@@ -1117,16 +1117,27 @@ def test_forward_squeeze():
11171117# Pad
11181118# ---
11191119
1120- def _test_pad (data ):
1120+ def _test_pad (data , quantized = False ):
11211121 """ One iteration of PAD """
11221122
11231123 assert len (data ) == 2
11241124
11251125 # Test with tensor and constant
11261126 with tf .Graph ().as_default ():
1127- in_data = [array_ops .placeholder (shape = data [0 ].shape , dtype = data [0 ].dtype , name = 'in' )]
1128- out = array_ops .pad (in_data [0 ], ops .convert_to_tensor (data [1 ], dtype = data [1 ].dtype ))
1129- compare_tflite_with_tvm ([data [0 ]], ['in:0' ], in_data , [out ])
1127+ in_data = [array_ops .placeholder (shape = data [0 ].shape , dtype = 'float32' , name = 'in' )]
1128+
1129+ if quantized :
1130+ min_value , max_value = - 100 , 100
1131+ # fake_quant will keep the tensors in float32 until the conversion in the session
1132+ inq_data = [tf .quantization .fake_quant_with_min_max_args (in_data [0 ],
1133+ min = - 100 ,
1134+ max = 100 ,
1135+ name = "inq_0" )]
1136+ out = array_ops .pad (inq_data [0 ], ops .convert_to_tensor (data [1 ], dtype = data [1 ].dtype ))
1137+ compare_tflite_with_tvm ([data [0 ]], ['inq_0:0' ], inq_data , [out ], quantized = True )
1138+ else :
1139+ out = array_ops .pad (in_data [0 ], ops .convert_to_tensor (data [1 ], dtype = data [1 ].dtype ))
1140+ compare_tflite_with_tvm ([data [0 ]], ['in:0' ], in_data , [out ])
11301141
11311142
11321143def test_forward_pad ():
@@ -1139,6 +1150,8 @@ def test_forward_pad():
11391150 np .array ([[1 , 1 ], [2 , 2 ]], dtype = np .int32 )])
11401151 _test_pad ([np .arange (1.0 , 4.0 , dtype = np .float32 ).reshape ((1 , 3 )),
11411152 np .array ([[1 , 1 ], [2 , 2 ]], dtype = np .int32 )])
1153+ _test_pad ([np .arange (0 , 256 , dtype = np .uint8 ).reshape ((1 , 256 )),
1154+ np .array ([[1 , 1 ], [2 , 2 ]], dtype = np .int32 )], quantized = True )
11421155
11431156
11441157#######################################################################
0 commit comments