Skip to content

Commit ffe47d9

Browse files
Fix tensorflow unit test redefinition issue (#1164)
Signed-off-by: lvliang-intel <liang1.lv@intel.com>
1 parent ea309f5 commit ffe47d9

File tree

4 files changed

+7
-123
lines changed

4 files changed

+7
-123
lines changed

test/adaptor/tensorflow_adaptor/test_tensorflow_graph_conv_fusion.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -279,7 +279,7 @@ def test_conv_squeeze_biasadd_relu_fusion(self):
279279
self.assertEqual(correct_conv_fusion, True)
280280

281281
@disable_random()
282-
def test_conv_biasadd_addv2_relu_fallback_fusion(self):
282+
def test_conv_biasadd_addv2_relu_fallback_fusion_1(self):
283283
x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input")
284284
top_relu = tf.nn.leaky_relu(x)
285285
paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])
@@ -325,7 +325,7 @@ def test_conv_biasadd_addv2_relu_fallback_fusion(self):
325325
self.assertEqual(found_conv_fusion, True)
326326

327327
@disable_random()
328-
def test_conv_biasadd_addv2_relu_fallback_fusion(self):
328+
def test_conv_biasadd_addv2_relu_fallback_fusion_2(self):
329329
x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input")
330330
top_relu = tf.nn.relu(x)
331331
paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])

test/tfnewapi/test_tensorflow_graph_conv_fusion.py

Lines changed: 0 additions & 116 deletions
Original file line numberDiff line numberDiff line change
@@ -755,121 +755,5 @@ def test_single_deconv3d_fusion(self):
755755

756756
self.assertEqual(found_deconv3d_fusion, True)
757757

758-
@disable_random()
759-
def test_conv_fusion_with_max_pooling(self):
760-
x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input")
761-
762-
relu = tf.nn.relu(x)
763-
pooling = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME")
764-
conv_weights = tf.compat.v1.get_variable("weight2", [3, 3, 16, 16],
765-
initializer=tf.compat.v1.random_normal_initializer())
766-
conv = tf.nn.conv2d(pooling, conv_weights, strides=[1, 2, 2, 1], padding="VALID")
767-
biasadd = tf.compat.v1.layers.batch_normalization(conv, name='op_to_store')
768-
out_name = biasadd.name.split(':')[0]
769-
with tf.compat.v1.Session() as sess:
770-
sess.run(tf.compat.v1.global_variables_initializer())
771-
output_graph_def = graph_util.convert_variables_to_constants(
772-
sess=sess,
773-
input_graph_def=sess.graph_def,
774-
output_node_names=[out_name])
775-
776-
from neural_compressor.experimental import Quantization, common
777-
quantizer = Quantization('fake_yaml.yaml')
778-
dataset = quantizer.dataset('dummy', shape=(100, 56, 56, 16), label=True)
779-
quantizer.eval_dataloader = common.DataLoader(dataset)
780-
quantizer.calib_dataloader = common.DataLoader(dataset)
781-
quantizer.model = output_graph_def
782-
output_graph = quantizer.fit()
783-
784-
quantized_pool_data_type = None
785-
quantized_conv_data_type = None
786-
for i in output_graph.graph_def.node:
787-
if i.op.find("QuantizedMaxPool") != -1:
788-
quantized_pool_data_type = i.attr['T'].type
789-
if i.op.find("QuantizedConv2D") != -1:
790-
quantized_conv_data_type = i.attr['Tinput'].type
791-
792-
self.assertNotEqual(quantized_pool_data_type, None)
793-
self.assertEqual(quantized_pool_data_type, quantized_conv_data_type)
794-
795-
@disable_random()
796-
def test_conv3d_addv2_relu_fusion(self):
797-
x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input")
798-
top_relu = tf.nn.relu(x)
799-
conv3d_1_weights = tf.compat.v1.get_variable("weight_conv3d_1", [3, 3, 3, 16, 32],
800-
initializer=tf.compat.v1.random_normal_initializer())
801-
conv3d_1 = tf.nn.conv3d(top_relu, conv3d_1_weights, strides=[1, 2, 2, 2, 1], padding="SAME")
802-
add = tf.raw_ops.AddV2(x=conv3d_1, y=tf.constant(np.random.randn(32), dtype=tf.float32), name='addv2')
803-
relu = tf.nn.relu(add)
804-
conv3d_2_weights = tf.compat.v1.get_variable("weight_conv3d_2", [3, 3, 3, 32, 1],
805-
initializer=tf.compat.v1.random_normal_initializer())
806-
conv3d_2 = tf.nn.conv3d(relu, conv3d_2_weights, strides=[1, 2, 2, 2, 1], padding="SAME")
807-
808-
out_name = conv3d_2.name.split(':')[0]
809-
with tf.compat.v1.Session() as sess:
810-
sess.run(tf.compat.v1.global_variables_initializer())
811-
output_graph_def = graph_util.convert_variables_to_constants(
812-
sess=sess,
813-
input_graph_def=sess.graph_def,
814-
output_node_names=[out_name])
815-
from neural_compressor.experimental import Quantization, common
816-
quantizer = Quantization('fake_yaml.yaml')
817-
dataset = quantizer.dataset('dummy', shape=(100, 128, 64, 64, 16), label=True)
818-
quantizer.eval_dataloader = common.DataLoader(dataset)
819-
quantizer.calib_dataloader = common.DataLoader(dataset)
820-
quantizer.model = output_graph_def
821-
output_graph = quantizer.fit()
822-
823-
found_conv_sumadd_fusion = False
824-
found_conv_biasadd_fusion = False
825-
for i in output_graph.graph_def.node:
826-
if i.op == '_FusedQuantizedConv3D':
827-
if b'Sum' in i.attr['fused_ops'].list.s:
828-
found_conv_sumadd_fusion = True
829-
if i.attr['fused_ops'].list.s == [b'BiasAdd', b'Relu', b'Requantize']:
830-
found_conv_biasadd_fusion = True
831-
self.assertEqual(found_conv_sumadd_fusion, False)
832-
self.assertEqual(found_conv_biasadd_fusion, True)
833-
834-
# conv2d + dummybiasadd + addv2 fusion
835-
@disable_random()
836-
def test_conv_add_addn_non_const_fusion(self):
837-
x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input")
838-
paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])
839-
x_pad = tf.pad(x, paddings, "CONSTANT")
840-
top_relu = tf.nn.relu(x_pad)
841-
conv2d_1_weights = tf.compat.v1.get_variable("weight1", [3, 3, 16, 16],
842-
initializer=tf.compat.v1.random_normal_initializer())
843-
conv2d_1 = tf.nn.conv2d(top_relu, conv2d_1_weights, strides=[1, 2, 2, 1], padding="SAME")
844-
conv2d_2_weights = tf.compat.v1.get_variable("weight2", [3, 3, 16, 16],
845-
initializer=tf.compat.v1.random_normal_initializer())
846-
conv2d_2 = tf.nn.conv2d(top_relu, conv2d_2_weights, strides=[1, 2, 2, 1], padding="SAME")
847-
add_1 = tf.raw_ops.AddV2(x=conv2d_1, y=conv2d_2, name='addv2_1')
848-
conv2d_3_weights = tf.compat.v1.get_variable("weight3", [3, 3, 16, 16],
849-
initializer=tf.compat.v1.random_normal_initializer())
850-
conv2d_3 = tf.nn.conv2d(top_relu, conv2d_3_weights, strides=[1, 2, 2, 1], padding="SAME")
851-
add = tf.raw_ops.AddV2(x=add_1, y=conv2d_3, name='addv2_2')
852-
out_name = add.name.split(':')[0]
853-
with tf.compat.v1.Session() as sess:
854-
sess.run(tf.compat.v1.global_variables_initializer())
855-
output_graph_def = graph_util.convert_variables_to_constants(
856-
sess=sess,
857-
input_graph_def=sess.graph_def,
858-
output_node_names=[out_name])
859-
from neural_compressor.experimental import Quantization, common
860-
quantizer = Quantization('fake_yaml.yaml')
861-
dataset = quantizer.dataset('dummy', shape=(100, 56, 56, 16), label=True)
862-
quantizer.eval_dataloader = common.DataLoader(dataset)
863-
quantizer.calib_dataloader = common.DataLoader(dataset)
864-
quantizer.model = output_graph_def
865-
output_graph = quantizer.fit()
866-
867-
found_conv_fusion = False
868-
for i in output_graph.graph_def.node:
869-
if i.op == '_FusedQuantizedConv2D' and \
870-
i.attr['fused_ops'].list.s == [b'BiasAdd', b'Sum', b'Requantize']:
871-
found_conv_fusion = True
872-
self.assertEqual(found_conv_fusion, True)
873-
874758
if __name__ == '__main__':
875759
unittest.main()

test/tfnewapi/test_tensorflow_graph_conv_requantize_fusion.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -415,7 +415,7 @@ def test_conv3d_add_const_addn_relu_fusion(self):
415415
if str(i.attr['fused_ops'].list.s) == str([b'BiasAdd', b'Sum', b'Relu']):
416416
found_conv_biasadd_fusion = True
417417
self.assertEqual(found_conv_sumadd_fusion, True)
418-
self.assertEqual(found_conv_biasadd_fusion, True)
418+
self.assertEqual(found_conv_biasadd_fusion, False)
419419

420420
@disable_random()
421421
def test_conv3d_add_addn_fusion(self):
@@ -562,8 +562,8 @@ def test_conv3d_add_fusion(self):
562562
self.assertEqual(found_conv_fusion, True)
563563

564564
@disable_random()
565-
def test_conv3d_add_const_addn_relu_fusion(self):
566-
logging.getLogger().info("test_conv3d_add_const_addn_relu_fusion")
565+
def test_conv3d_add_const_addn_relu_requantize_fusion(self):
566+
logging.getLogger().info("test_conv3d_add_const_addn_relu_requantize_fusion")
567567
x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input")
568568
paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]])
569569
x_pad = tf.pad(x, paddings, "CONSTANT")

test/tfnewapi/test_tensorflow_graph_qdq_conv3d_fusion.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -521,8 +521,8 @@ def test_conv3d_add_fusion(self):
521521
self.assertEqual(found_conv_fusion, True)
522522

523523
@disable_random()
524-
def test_conv3d_add_const_addn_relu_fusion(self):
525-
logging.getLogger().info("test_conv3d_add_const_addn_relu_fusion")
524+
def test_conv3d_add_const_addn_relu_requantize_fusion(self):
525+
logging.getLogger().info("test_conv3d_add_const_addn_relu_requantize_fusion")
526526
x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input")
527527
paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]])
528528
x_pad = tf.pad(x, paddings, "CONSTANT")

0 commit comments

Comments
 (0)