Skip to content

Commit 7ceafcc

Browse files
committed
[tmva][sofie] Fix test Keras parser and a bug in Squeeze
Fix bug in Squeeze operator when the axes to squeeze are provided. In that case a wrong usage of vector.erase was done. Fix the Keras parser tests when convolutions with channel_first are not supported (e.g. on CPU impelmentations of tensorflows)
1 parent 4e437ec commit 7ceafcc

File tree

7 files changed

+140
-44
lines changed

7 files changed

+140
-44
lines changed

bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/generate_keras_functional.py

Lines changed: 28 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,17 @@
11
def generate_keras_functional(dst_dir):
22

3-
from keras import models, layers
3+
from keras import models, layers, backend
44
import numpy as np
55

6+
def is_channels_first_supported() :
7+
#channel first is not supported on tensorflow CPU versions
8+
from keras import backend
9+
if backend.backend() == "tensorflow" :
10+
import tensorflow as tf
11+
if len(tf.config.list_physical_devices("GPU")) == 0:
12+
return False
13+
return True
14+
615
# Helper training function
716
def train_and_save(model, name):
817
# Handle multiple inputs dynamically
@@ -13,8 +22,11 @@ def train_and_save(model, name):
1322
y_train = np.random.rand(32, *model.output_shape[1:])
1423

1524
model.compile(optimizer='adam', loss='mean_squared_error', metrics=['mae'])
25+
model.summary()
1626
model.fit(x_train, y_train, epochs=1, verbose=0)
1727
model.save(f"{dst_dir}/Functional_{name}_test.keras")
28+
print("generated and saved functional model",name)
29+
1830

1931
# Activation Functions
2032
for act in ['relu', 'elu', 'leaky_relu', 'selu', 'sigmoid', 'softmax', 'swish', 'tanh']:
@@ -58,10 +70,11 @@ def train_and_save(model, name):
5870
train_and_save(model, "Concat")
5971

6072
# Conv2D channels_first
61-
inp = layers.Input(shape=(3, 8, 8))
62-
out = layers.Conv2D(4, (3, 3), padding='same', data_format='channels_first', activation='relu')(inp)
63-
model = models.Model(inp, out)
64-
train_and_save(model, "Conv2D_channels_first")
73+
if (is_channels_first_supported()):
74+
inp = layers.Input(shape=(3, 8, 8))
75+
out = layers.Conv2D(4, (3, 3), padding='same', data_format='channels_first', activation='relu')(inp)
76+
model = models.Model(inp, out)
77+
train_and_save(model, "Conv2D_channels_first")
6578

6679
# Conv2D channels_last
6780
inp = layers.Input(shape=(8, 8, 3))
@@ -100,10 +113,11 @@ def train_and_save(model, name):
100113
train_and_save(model, "Flatten")
101114

102115
# GlobalAveragePooling2D channels first
103-
inp = layers.Input(shape=(3, 4, 6))
104-
out = layers.GlobalAveragePooling2D(data_format='channels_first')(inp)
105-
model = models.Model(inp, out)
106-
train_and_save(model, "GlobalAveragePooling2D_channels_first")
116+
if (is_channels_first_supported):
117+
inp = layers.Input(shape=(3, 4, 6))
118+
out = layers.GlobalAveragePooling2D(data_format='channels_first')(inp)
119+
model = models.Model(inp, out)
120+
train_and_save(model, "GlobalAveragePooling2D_channels_first")
107121

108122
# GlobalAveragePooling2D channels last
109123
inp = layers.Input(shape=(4, 6, 3))
@@ -124,10 +138,11 @@ def train_and_save(model, name):
124138
train_and_save(model, "LeakyReLU")
125139

126140
# MaxPooling2D channels_first
127-
inp = layers.Input(shape=(3, 8, 8))
128-
out = layers.MaxPooling2D(pool_size=(2, 2), data_format='channels_first')(inp)
129-
model = models.Model(inp, out)
130-
train_and_save(model, "MaxPool2D_channels_first")
141+
if (is_channels_first_supported):
142+
inp = layers.Input(shape=(3, 8, 8))
143+
out = layers.MaxPooling2D(pool_size=(2, 2), data_format='channels_last')(inp)
144+
model = models.Model(inp, out)
145+
train_and_save(model, "MaxPool2D_channels_first")
131146

132147
# MaxPooling2D channels_last
133148
inp = layers.Input(shape=(8, 8, 3))

bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/generate_keras_sequential.py

Lines changed: 43 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,36 @@
11
def generate_keras_sequential(dst_dir):
22

3-
from keras import models, layers
3+
from keras import models, layers, backend
44
import numpy as np
55

6+
def is_channels_first_supported() :
7+
#channel first is not supported on tensorflow CPU versions
8+
from keras import backend
9+
if backend.backend() == "tensorflow" :
10+
import tensorflow as tf
11+
if len(tf.config.list_physical_devices("GPU")) == 0:
12+
return False
13+
return True
14+
615
# Helper training function
716
def train_and_save(model, name):
817
x_train = np.random.rand(32, *model.input_shape[1:])
918
y_train = np.random.rand(32, *model.output_shape[1:])
1019
model.compile(optimizer='adam', loss='mean_squared_error', metrics=['mae'])
1120
model.fit(x_train, y_train, epochs=1, verbose=0)
21+
model.summary()
22+
print("fitting sequential model",name)
1223
model.save(f"{dst_dir}/Sequential_{name}_test.keras")
1324

25+
def is_channels_first_supported() :
26+
#channel first is not supported on tensorflow CPU versions
27+
if backend.backend() == "tensorflow" :
28+
import tensorflow as tf
29+
if len(tf.config.list_physical_devices("GPU")) == 0:
30+
return False
31+
32+
return True
33+
1434
# Binary Ops: Add, Subtract, Multiply are not typical in Sequential - skipping those
1535
# Concat (not applicable in Sequential without multi-input)
1636

@@ -21,15 +41,16 @@ def train_and_save(model, name):
2141
layers.Activation(act)
2242
])
2343
train_and_save(model, f"Activation_layer_{act.capitalize()}")
24-
# Along with this, Keras also allows explicit delcaration of activation layers such as:
44+
# Along with this, Keras also allows explicit declaration of activation layers such as:
2545
# ELU, ReLU, LeakyReLU, Softmax
2646

2747
# AveragePooling2D channels_first
28-
model = models.Sequential([
29-
layers.Input(shape=(3, 8, 8)),
30-
layers.AveragePooling2D(pool_size=(2, 2), data_format='channels_first')
31-
])
32-
train_and_save(model, "AveragePooling2D_channels_first")
48+
if (is_channels_first_supported()):
49+
model = models.Sequential([
50+
layers.Input(shape=(3, 8, 8)),
51+
layers.AveragePooling2D(pool_size=(2, 2), data_format='channels_first')
52+
])
53+
train_and_save(model, "AveragePooling2D_channels_first")
3354

3455
# AveragePooling2D channels_last
3556
model = models.Sequential([
@@ -46,11 +67,12 @@ def train_and_save(model, name):
4667
train_and_save(model, "BatchNorm")
4768

4869
# Conv2D channels_first
49-
model = models.Sequential([
70+
if (is_channels_first_supported()):
71+
model = models.Sequential([
5072
layers.Input(shape=(3, 8, 8)),
5173
layers.Conv2D(4, (3, 3), data_format='channels_first')
52-
])
53-
train_and_save(model, "Conv2D_channels_first")
74+
])
75+
train_and_save(model, "Conv2D_channels_first")
5476

5577
# Conv2D channels_last
5678
model = models.Sequential([
@@ -95,11 +117,12 @@ def train_and_save(model, name):
95117
train_and_save(model, "Flatten")
96118

97119
# GlobalAveragePooling2D channels first
98-
model = models.Sequential([
120+
if (is_channels_first_supported()):
121+
model = models.Sequential([
99122
layers.Input(shape=(3, 4, 6)),
100123
layers.GlobalAveragePooling2D(data_format='channels_first')
101-
])
102-
train_and_save(model, "GlobalAveragePooling2D_channels_first")
124+
])
125+
train_and_save(model, "GlobalAveragePooling2D_channels_first")
103126

104127
# GlobalAveragePooling2D channels last
105128
model = models.Sequential([
@@ -123,11 +146,12 @@ def train_and_save(model, name):
123146
train_and_save(model, "LeakyReLU")
124147

125148
# MaxPooling2D channels_first
126-
model = models.Sequential([
149+
if (is_channels_first_supported()):
150+
model = models.Sequential([
127151
layers.Input(shape=(3, 8, 8)),
128152
layers.MaxPooling2D(pool_size=(2, 2), data_format='channels_first')
129-
])
130-
train_and_save(model, "MaxPool2D_channels_first")
153+
])
154+
train_and_save(model, "MaxPool2D_channels_first")
131155

132156
# MaxPooling2D channels_last
133157
model = models.Sequential([
@@ -176,9 +200,9 @@ def train_and_save(model, name):
176200
train_and_save(modelA, "Layer_Combination_1")
177201

178202
modelB = models.Sequential([
179-
layers.Input(shape=(3, 32, 32)),
180-
layers.Conv2D(8, (3,3), padding='valid', data_format='channels_first', activation='relu'),
181-
layers.MaxPooling2D((2,2), data_format='channels_first'),
203+
layers.Input(shape=(32,32,3)),
204+
layers.Conv2D(8, (3,3), padding='valid', data_format='channels_last', activation='relu'),
205+
layers.MaxPooling2D((2,2), data_format='channels_last'),
182206
layers.Flatten(),
183207
layers.Dense(128, activation='relu'),
184208
layers.Reshape((16, 8)),

bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/parser.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ def add_layer_into_RModel(rmodel, layer_data):
9898

9999
fLayerType = layer_data['layerType']
100100

101-
print('Model: adding layer',fLayerType)
101+
print('Model: parsing layer',fLayerType)
102102

103103
# reshape and flatten layers don't have weights, but they need constant tensor for the shape
104104
if fLayerType == "Reshape" or fLayerType == "Flatten":

bindings/pyroot/pythonizations/test/sofie_keras_parser.py

Lines changed: 31 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,16 @@
22
import os
33
import shutil
44

5+
def is_channels_first_supported() :
6+
#channel first is not supported on tensorflow CPU versions
7+
from keras import backend
8+
if backend.backend() == "tensorflow" :
9+
import tensorflow as tf
10+
if len(tf.config.list_physical_devices("GPU")) == 0:
11+
return False
12+
13+
return True
14+
515
from ROOT._pythonization._tmva._sofie._parser._keras.parser_test_function import generate_and_test_inference
616
from ROOT._pythonization._tmva._sofie._parser._keras.generate_keras_functional import generate_keras_functional
717
from ROOT._pythonization._tmva._sofie._parser._keras.generate_keras_sequential import generate_keras_sequential
@@ -11,35 +21,43 @@ def make_testname(test_case: str):
1121
test_case_name = test_case.replace("_", " ").removesuffix(".keras")
1222
return test_case_name
1323

24+
25+
1426
models = [
1527
"AveragePooling2D_channels_first",
1628
"AveragePooling2D_channels_last",
1729
"BatchNorm",
18-
# "Conv2D_channels_first",
19-
# "Conv2D_channels_last",
20-
# "Conv2D_padding_same",
21-
# "Conv2D_padding_valid",
30+
"Conv2D_channels_first",
31+
"Conv2D_channels_last",
32+
"Conv2D_padding_same",
33+
"Conv2D_padding_valid",
2234
"Dense",
2335
"ELU",
2436
"Flatten",
25-
## "GlobalAveragePooling2D_channels_first", #failing
37+
"GlobalAveragePooling2D_channels_first", #failing
2638
"GlobalAveragePooling2D_channels_last",
27-
# "GRU",
39+
#"GRU",
2840
"LayerNorm",
2941
"LeakyReLU",
30-
# "LSTM",
42+
#"LSTM",
3143
"MaxPool2D_channels_first",
3244
"MaxPool2D_channels_last",
3345
"Permute",
3446
"ReLU",
3547
"Reshape",
36-
# "SimpleRNN",
48+
#"SimpleRNN",
3749
"Softmax",
3850
] + ([f"Activation_layer_{activation_function.capitalize()}" for activation_function in
3951
['relu', 'elu', 'leaky_relu', 'selu', 'sigmoid', 'softmax', 'swish', 'tanh']] +
4052

4153
[f"Layer_Combination_{i}" for i in range(1, 4)])
4254

55+
#remove channel first cases if not supported
56+
if (not is_channels_first_supported()):
57+
models = [m for m in models if "channels_first" not in m]
58+
59+
print(models)
60+
4361
class SOFIE_Keras_Parser(unittest.TestCase):
4462

4563
def setUp(self):
@@ -50,8 +68,12 @@ def setUp(self):
5068
os.makedirs(base_dir + "/generated_header_files_dir")
5169

5270
def run_model_tests(self, model_type: str, generate_function, model_list):
71+
print("Generating", model_type," models for testing")
5372
generate_function(f"{model_type}/input_models")
5473
for keras_model in model_list:
74+
print("**********************************")
75+
print("Run test for",model_type,"model: ",keras_model)
76+
print("**********************************")
5577
keras_model_name = f"{model_type.capitalize()}_{keras_model}_test.keras"
5678
keras_model_path = f"{model_type}/input_models/" + keras_model_name
5779
with self.subTest(msg=make_testname(keras_model_name)):
@@ -71,4 +93,4 @@ def tearDownClass(self):
7193
shutil.rmtree("functional")
7294

7395
if __name__ == "__main__":
74-
unittest.main()
96+
unittest.main()

tmva/sofie/inc/TMVA/RModel.hxx

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -210,8 +210,14 @@ public:
210210
void ReadInitializedTensorsFromFile(long);
211211
long WriteInitializedTensorsToFile(std::string filename = "");
212212

213+
<<<<<<< HEAD
213214
void PrintIntermediateTensors() const;
214215
void PrintOutputTensors() const;
216+
=======
217+
void PrintSummary();
218+
void PrintIntermediateTensors();
219+
void PrintOutputTensors();
220+
>>>>>>> 075006e08df ([tmva][sofie] Fix test Keras parser and a bug in Squeeze)
215221
void OutputGenerated(std::string filename = "", bool append = false);
216222
std::vector<std::string> GetOutputTensorNames() { return fOutputTensorNames; }
217223
void SetFilename(std::string filename) { fName = filename; }

tmva/sofie/inc/TMVA/ROperator_Reshape.hxx

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,7 @@ public:
7070
fAttrAxes(attrAxes)
7171
{
7272
assert(fOpMode == Squeeze || fOpMode == Unsqueeze);
73+
fInputTensorNames = { fNData };
7374
fOutputTensorNames = { fNOutput };
7475
}
7576

@@ -199,14 +200,23 @@ public:
199200
}
200201
}
201202
} else {
202-
auto &axes = fAttrAxes;
203+
std::cout << "getting shape for Squeeze...from attribute\n";
204+
auto axes = fAttrAxes;
203205
for (size_t i = 0; i < axes.size(); i++) {
206+
std::cout << i << " " << axes[i] << std::endl;
204207
if (axes[i] < 0)
205208
axes[i] += input_shape.size();
206209
if (!(output_shape[axes[i]] == Dim{1}))
207210
throw std::runtime_error("TMVA Squeeze Op : Invalid axis value " + std::to_string(axes[i]) +
208211
" for " + ConvertShapeToString(output_shape));
209-
output_shape.erase(output_shape.begin() + axes[i]);
212+
}
213+
// for calling vector::erase we must sort axes in decreasing order to avoid
214+
std::sort(axes.begin(), axes.end(), std::greater<int>());
215+
for (auto & axis : axes) {
216+
std::cout << "erase give axis " << axis << " -> ";
217+
for (auto & o : output_shape) std::cout << o << " , ";
218+
std::cout << std::endl;
219+
output_shape.erase(output_shape.begin() + axis);
210220
}
211221
}
212222
ret.push_back(output_shape);

tmva/sofie/src/RModel.cxx

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1422,7 +1422,26 @@ long RModel::WriteInitializedTensorsToFile(std::string filename) {
14221422
}
14231423
}
14241424

1425+
<<<<<<< HEAD
14251426
void RModel::PrintRequiredInputTensors() const {
1427+
=======
1428+
void RModel::PrintSummary() {
1429+
std::cout << "Summary of model " << GetName() << std::endl;
1430+
for(size_t op_idx = 0; op_idx < fOperators.size(); ++op_idx){
1431+
auto& r = *fOperators[op_idx].get();
1432+
std::string raw_name = typeid(r).name();
1433+
// look for ROperator_NAME
1434+
std::string name = raw_name.substr(raw_name.find("ROperator_")+10, raw_name.size());
1435+
std::cout << op_idx << " " << name << " : ";
1436+
for (auto & t_in : r.GetOpInputTensors()) std::cout << t_in << " ";
1437+
std::cout << " ----> ";
1438+
for (auto & t_out : r.GetOpOutputTensors()) std::cout << t_out << " ";
1439+
std::cout << std::endl;
1440+
}
1441+
}
1442+
1443+
void RModel::PrintRequiredInputTensors() {
1444+
>>>>>>> 075006e08df ([tmva][sofie] Fix test Keras parser and a bug in Squeeze)
14261445
std::cout << "Model requires following inputs:\n";
14271446
for (auto& inputInfo: fInputTensorInfos) {
14281447
std::cout << "Parametrised Tensor name: " << inputInfo.first << "\t";

0 commit comments

Comments
 (0)