Skip to content

Commit 2e8133d

Browse files
authored
[TFLite] Added check for dynamic range quantization (apache#7114)
* [TFLite] Added check for dynamic range quantization Added check to prevent optimized with "dynamic range quantization" tflite files to be loaded as the optimization is not fully supported. https://www.tensorflow.org/lite/performance/post_training_quantization#dynamic_range_quantization * linter * linter * unit test fix
1 parent 618ef9e commit 2e8133d

File tree

2 files changed

+52
-3
lines changed

2 files changed

+52
-3
lines changed

python/tvm/relay/frontend/tflite.py

Lines changed: 31 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -176,17 +176,45 @@ def __init__(self, model, subgraph, exp_tab):
176176
def check_unsupported_ops(self):
177177
"""Check unsupported TFLite ops in our converter."""
178178
unsupported_ops_set = set()
179-
179+
dynamic_range_ops_set = set()
180180
for op_idx in range(self.subgraph.OperatorsLength()):
181181
op = self.subgraph.Operators(op_idx)
182182
op_code_str = self.get_op_code_str(op)
183183
if op_code_str not in self.convert_map:
184184
unsupported_ops_set.add(op_code_str)
185+
continue
186+
187+
# Trying to exclude "dynamic range quantization" optimized ops as not supported in TVM
188+
qnn_in_cnt = len(
189+
[_.qnn_params for _ in self.get_input_tensors(op)[0:1] if _.qnn_params is not None]
190+
)
191+
qnn_weight_cnt = len(
192+
[_.qnn_params for _ in self.get_input_tensors(op)[1:] if _.qnn_params is not None]
193+
)
194+
qnn_out_cnt = len(
195+
[_.qnn_params for _ in self.get_output_tensors(op) if _.qnn_params is not None]
196+
)
197+
198+
if qnn_in_cnt == 0 and qnn_out_cnt == 0 and qnn_weight_cnt > 0:
199+
dynamic_range_ops_set.add(op_code_str)
200+
201+
raise_msg = ""
185202

186203
if unsupported_ops_set:
187-
msg = "The following operators are not supported in frontend " "TFLite: {}"
204+
msg = "The following operators are not supported in frontend " "TFLite: {}\n"
188205
ops = str(list(unsupported_ops_set)).strip("[,]")
189-
raise tvm.error.OpNotImplemented(msg.format(ops))
206+
raise_msg += msg.format(ops)
207+
208+
if dynamic_range_ops_set:
209+
msg = (
210+
"The following operators are likely to have dynamic range quantization: {}. "
211+
"If you are running an optimized graph, please turn off dynamic range quantization "
212+
"or use full integer quantization"
213+
)
214+
raise_msg += msg.format(str(list(dynamic_range_ops_set)).strip("[,]"))
215+
216+
if len(raise_msg) > 0:
217+
raise tvm.error.OpNotImplemented(raise_msg)
190218

191219
def convert_op_to_relay(self):
192220
"""Convert TFLite ops to relay ops"""

tests/python/frontend/tflite/test_forward.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4156,6 +4156,27 @@ def test_forward_mediapipe_hand_landmark():
41564156
)
41574157

41584158

4159+
#######################################################################
4160+
# Test check for Tensorflow "dynamic range quantization" optimization
4161+
# --------------
4162+
def test_prevent_tensorflow_dynamic_range():
4163+
"""
4164+
Should prevent runnung "dynamic range quantization" optimized TFLite graph
4165+
"""
4166+
data_array = np.random.randint(0, 2, (1, 1024, 1024)).astype(dtype=np.float32)
4167+
filter_array = np.random.randint(0, 2, (1024, 1024)).astype(dtype=np.float32)
4168+
data_in = tf.keras.layers.Input(shape=data_array.shape[1:])
4169+
dense = tf.keras.layers.Dense(units=filter_array.shape[-1], use_bias=False)(data_in)
4170+
keras_model = tf.keras.models.Model(data_in, dense)
4171+
keras_model.layers[1].set_weights([filter_array])
4172+
4173+
converter = interpreter_wrapper.TFLiteConverter.from_keras_model(keras_model)
4174+
converter.optimizations = [tf.lite.Optimize.DEFAULT]
4175+
tflite_model = converter.convert()
4176+
with pytest.raises(tvm.error.OpNotImplemented):
4177+
tvm_output = run_tvm_graph(tflite_model, data_array, data_in.name.replace(":0", ""))
4178+
4179+
41594180
#######################################################################
41604181
# Main
41614182
# ----

0 commit comments

Comments
 (0)