diff --git a/python/tvm/relay/frontend/tflite.py b/python/tvm/relay/frontend/tflite.py index 85e24c6024a3..4e4092b7b387 100644 --- a/python/tvm/relay/frontend/tflite.py +++ b/python/tvm/relay/frontend/tflite.py @@ -1141,36 +1141,22 @@ def convert_abs(self, op): def convert_ceil(self, op): """Convert TFLite CEIL""" - if self.is_quantized(op): - raise tvm.error.OpNotImplemented("TFlite quantized CEIL operator is not supported yet.") return self._convert_unary_elemwise(_op.ceil, op) def convert_floor(self, op): """Convert TFLite FLOOR""" - if self.is_quantized(op): - raise tvm.error.OpNotImplemented( - "TFlite quantized FLOOR operator is not supported yet." - ) return self._convert_unary_elemwise(_op.floor, op) def convert_round(self, op): """Convert TFLite ROUND""" - if self.is_quantized(op): - raise tvm.error.OpNotImplemented( - "TFlite quantized ROUND operator is not supported yet." - ) return self._convert_unary_elemwise(_op.round, op) def convert_exp(self, op): """Convert TFLite EXP""" - if self.is_quantized(op): - raise tvm.error.OpNotImplemented("TFlite quantized EXP operator is not supported yet.") return self._convert_unary_elemwise(_op.exp, op) def convert_log(self, op): """Convert TFLite LOG""" - if self.is_quantized(op): - raise tvm.error.OpNotImplemented("TFlite quantized LOG operator is not supported yet.") return self._convert_unary_elemwise(_op.log, op) def convert_sin(self, op): @@ -1179,14 +1165,10 @@ def convert_sin(self, op): def convert_tan(self, op): """Convert TFLite TAN""" - if self.is_quantized(op): - raise tvm.error.OpNotImplemented("TFlite quantized TAN operator is not supported yet.") return self._convert_unary_elemwise(_op.tan, op) def convert_cos(self, op): """Convert TFLite COS""" - if self.is_quantized(op): - raise tvm.error.OpNotImplemented("TFlite quantized COS operator is not supported yet.") return self._convert_unary_elemwise(_op.cos, op) def convert_sqrt(self, op): diff --git a/tests/python/frontend/tflite/test_forward.py b/tests/python/frontend/tflite/test_forward.py index 599669e86d84..36e6ef13a5bb 100644 --- a/tests/python/frontend/tflite/test_forward.py +++ b/tests/python/frontend/tflite/test_forward.py @@ -1765,13 +1765,117 @@ def test_forward_concatenation(): # -------------- -def _test_unary_elemwise(math_op, data): +def _test_unary_elemwise(math_op, data, quantized, quant_range=[-6, 6]): """One iteration of unary elemwise""" + if quantized: + with tf.Graph().as_default(): + quant_min, quant_max = quant_range + in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0") + inq_data = tf.quantization.fake_quant_with_min_max_args( + in_data, min=quant_min, max=quant_max, name="inq_0" + ) + input_range = {"inq_0": (quant_min, quant_max)} + out = math_op(inq_data) + out = tf.quantization.fake_quant_with_min_max_args( + out, min=quant_min, max=quant_max, name="out" + ) + compare_tflite_with_tvm( + data, + "inq_0:0", + [inq_data], + [out], + quantized=True, + input_range=input_range, + experimental_new_converter=True, + ) + else: + with tf.Graph().as_default(): + in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype, name="in") + out = math_op(in_data) + compare_tflite_with_tvm(data, ["in:0"], [in_data], [out]) + + +def _unary_elewise_create_model(math_op, data, offset=0): + class Model(tf.Module): + @tf.function + def tf_function(self, x): + op = math_op(x) + return op + + dtype = "int8" + model = Model() + + # Save the model + export_dir = tempfile.gettempdir() + "/tf_model" + tf.saved_model.save( + model, + export_dir, + signatures=model.tf_function.get_concrete_function( + tf.TensorSpec(data.shape, tf.float32, name="input"), + ), + ) - with tf.Graph().as_default(): - in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype, name="in") - out = math_op(in_data) - compare_tflite_with_tvm(data, ["in:0"], [in_data], [out]) + # Convert the model + def representative_dataset(): + for _ in range(100): + tmp_data = np.random.rand(*tuple(data.shape)) + yield [tmp_data.astype(np.float32) * 2 - offset] + + converter = tf.lite.TFLiteConverter.from_saved_model(export_dir) + converter.optimizations = [tf.lite.Optimize.DEFAULT] + converter.representative_dataset = representative_dataset + converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] + converter.inference_input_type = tf.int8 + converter.inference_output_type = tf.int8 + tflite_model = converter.convert() + return tflite_model + + +####################################################################### +# Abs +# ---- + + +def _test_abs(data, quantized): + """One iteration of abs""" + if quantized: + tflite_model_quant = _unary_elewise_create_model(tf.math.abs, data, offset=1) + tflite_output = run_tflite_graph(tflite_model_quant, data) + + # TFLite 2.6.x upgrade support + if tf.__version__ < LooseVersion("2.6.1"): + in_node = ["serving_default_input_int8"] + else: + in_node = ["tfl.quantize"] + + tvm_output = run_tvm_graph(tflite_model_quant, data, in_node) + tvm.testing.assert_allclose( + np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-2 + ) + else: + return _test_unary_elemwise(math_ops.abs, data, quantized) + + +####################################################################### +# Rsqrt +# ---- + + +def _test_rsqrt(data, quantized): + """One iteration of rsqrt""" + + # tensorflow version upgrade support + if tf.__version__ < LooseVersion("2.6.1") or not quantized: + return _test_unary_elemwise(math_ops.rsqrt, data, quantized, quant_range=[1, 6]) + else: + tflite_model_quant = _unary_elewise_create_model(tf.math.rsqrt, data) + tflite_output = run_tflite_graph(tflite_model_quant, data) + in_node = ["tfl.quantize"] + + tvm_output = run_tvm_graph(tflite_model_quant, data, in_node) + tvm.testing.assert_allclose( + np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-2 + ) ####################################################################### @@ -1779,9 +1883,9 @@ def _test_unary_elemwise(math_op, data): # ---- -def _test_ceil(data): +def _test_ceil(data, quantized): """One iteration of ceil""" - return _test_unary_elemwise(math_ops.ceil, data) + return _test_unary_elemwise(math_ops.ceil, data, quantized) ####################################################################### @@ -1789,9 +1893,9 @@ def _test_ceil(data): # ----- -def _test_floor(data): +def _test_floor(data, quantized): """One iteration of floor""" - return _test_unary_elemwise(math_ops.floor, data) + return _test_unary_elemwise(math_ops.floor, data, quantized) ####################################################################### @@ -1799,9 +1903,9 @@ def _test_floor(data): # ----- -def _test_round(data): +def _test_round(data, quantized): """One iteration of round""" - return _test_unary_elemwise(math_ops.round, data) + return _test_unary_elemwise(math_ops.round, data, quantized) ####################################################################### @@ -1809,9 +1913,9 @@ def _test_round(data): # --- -def _test_exp(data): +def _test_exp(data, quantized): """One iteration of exp""" - return _test_unary_elemwise(math_ops.exp, data) + return _test_unary_elemwise(math_ops.exp, data, quantized) ####################################################################### @@ -1819,9 +1923,9 @@ def _test_exp(data): # --- -def _test_log(data): +def _test_log(data, quantized): """One iteration of log""" - return _test_unary_elemwise(math_ops.log, data) + return _test_unary_elemwise(math_ops.log, data, quantized, quant_range=[1, 6]) ####################################################################### @@ -1829,38 +1933,9 @@ def _test_log(data): # --- -def _test_sin(data, quantized=False): +def _test_sin(data, quantized): """One iteration of sin""" - with tf.Graph().as_default(): - in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0") - - if quantized: - inq_data = tf.quantization.fake_quant_with_min_max_args( - in_data, min=1, max=6, name="inq_0" - ) - input_range = {"inq_0": (1, 6)} - out = math_ops.sin(inq_data) - out = tf.quantization.fake_quant_with_min_max_args(out, min=1, max=6, name="out") - compare_tflite_with_tvm( - data, - "inq_0:0", - [inq_data], - [out], - quantized=True, - input_range=input_range, - experimental_new_converter=True, - ) - else: - out = math_ops.sin(in_data) - compare_tflite_with_tvm(data, "in_0:0", [in_data], [out]) - - -def test_forward_sin(): - """SIN""" - _test_sin(np.arange(-2.0, 4.0, dtype=np.float32), quantized=False) - _test_sin(np.arange(-2.0, 4.0, dtype=np.float32).reshape((2, 1, 3)), quantized=False) - _test_sin(np.arange(1, 240, 40, dtype=np.uint8), quantized=True) - _test_sin(np.arange(1, 240, 40, dtype=np.uint8).reshape((2, 1, 3)), quantized=True) + return _test_unary_elemwise(math_ops.sin, data, quantized) ####################################################################### @@ -1868,9 +1943,18 @@ def test_forward_sin(): # --- -def _test_cos(data): +def _test_cos(data, quantized): """One iteration of cos""" - return _test_unary_elemwise(math_ops.cos, data) + if quantized: + tflite_model_quant = _unary_elewise_create_model(tf.math.cos, data) + tflite_output = run_tflite_graph(tflite_model_quant, data) + in_node = ["tfl.quantize"] + tvm_output = run_tvm_graph(tflite_model_quant, data, in_node) + tvm.testing.assert_allclose( + np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-2 + ) + else: + return _test_unary_elemwise(math_ops.cos, data, quantized) ####################################################################### @@ -1878,9 +1962,9 @@ def _test_cos(data): # --- -def _test_tan(data): +def _test_tan(data, quantized): """One iteration of tan""" - return _test_unary_elemwise(math_ops.tan, data) + return _test_unary_elemwise(math_ops.tan, data, quantized) ####################################################################### @@ -1888,9 +1972,29 @@ def _test_tan(data): # ------ -def _test_square(data): +def _test_square(data, quantized): """One iteration of square""" - return _test_unary_elemwise(math_ops.square, data) + return _test_unary_elemwise(math_ops.square, data, quantized) + + +####################################################################### +# Neg +# ------ + + +def _test_neg(data, quantized): + """One iteration of neg""" + return _test_unary_elemwise(math_ops.neg, data, quantized) + + +####################################################################### +# Neg +# ------ + + +def _test_sqrt(data, quantized): + """One iteration of sqrt""" + return _test_unary_elemwise(math_ops.sqrt, data, quantized, quant_range=[1, 6]) ####################################################################### @@ -1898,35 +2002,63 @@ def _test_square(data): # --- -def _test_elu(data): +def _test_elu(data, quantized): """One iteration of elu""" - return _test_unary_elemwise(nn_ops.elu, data) + return _test_unary_elemwise(nn_ops.elu, data, quantized) -def _test_forward_unary_elemwise(test_op): - # functions that need positive input - if test_op.__name__ in {"_test_log"}: - test_op(np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3))) +def _test_forward_unary_elemwise(test_op, quant_dtype=None, quantized=True, negtive=True): + # input data + in_data, inq_data = [], [] + + # quantized input data + if quantized: + quant_dtype = quant_dtype or np.uint8 + inq_data.append(np.arange(1, 240, 40, dtype=quant_dtype)) + inq_data.append(np.arange(1, 240, 40, dtype=quant_dtype).reshape((2, 1, 3))) + if quant_dtype == np.int8: + inq_data.append(np.arange(-128, 127, 45, dtype=np.int8)) + + for data in inq_data: + test_op(data, quantized=True) + + # normal input data + if negtive: + in_data.append(np.arange(-2.0, 4.0, dtype=np.float32)) + in_data.append(np.arange(-2.0, 4.0, dtype=np.float32).reshape((2, 1, 3))) else: - test_op(np.random.uniform(-10, 10, (3, 2)).astype(np.float32)) + in_data.append(np.arange(1.0, 7.0, dtype=np.float32)) + in_data.append(np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3))) + + for data in in_data: + test_op(data, quantized=False) def test_all_unary_elemwise(): + _test_forward_unary_elemwise(_test_abs, quant_dtype=np.int8) _test_forward_unary_elemwise(_test_floor) _test_forward_unary_elemwise(_test_exp) - _test_forward_unary_elemwise(_test_log) + _test_forward_unary_elemwise(_test_log, negtive=False) _test_forward_unary_elemwise(_test_square) + _test_forward_unary_elemwise(_test_sin) + _test_forward_unary_elemwise(_test_neg) + _test_forward_unary_elemwise(_test_sqrt, negtive=False) + # tensorflow version upgrade support + if tf.__version__ < LooseVersion("2.6.1"): + _test_forward_unary_elemwise(_test_rsqrt, negtive=False, quant_dtype=np.uint8) + else: + _test_forward_unary_elemwise(_test_rsqrt, negtive=False, quant_dtype=np.int8) # ceil and cos come with TFLite 1.14.0.post1 fbs schema if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"): _test_forward_unary_elemwise(_test_ceil) - _test_forward_unary_elemwise(_test_cos) + _test_forward_unary_elemwise(_test_cos, quant_dtype=np.int8) _test_forward_unary_elemwise(_test_round) # This fails with TF and Tflite 1.15.2, this could not have been tested # in CI or anywhere else. The failure mode is that we see a backtrace # from the converter that we need to provide a custom Tan operator # implementation. # _test_forward_unary_elemwise(_test_tan) - _test_forward_unary_elemwise(_test_elu) + _test_forward_unary_elemwise(_test_elu, quantized=False) ####################################################################### @@ -3359,253 +3491,6 @@ def test_forward_tanh(): _test_tanh(np.arange(0, 256, 30, dtype=np.uint8), quantized=True) -####################################################################### -# RSQRT -# ---- - - -def _test_quant_rsqrt(data): - """Test RSQRT with quantized data""" - - # tensorflow version upgrade support - if tf.__version__ < LooseVersion("2.6.1"): - with tf.Graph().as_default(): - in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0") - inq_data = tf.quantization.fake_quant_with_min_max_args( - in_data, min=1, max=6, name="inq_0" - ) - input_range = {"inq_0": (1, 6)} - out = math_ops.rsqrt(inq_data) - out = tf.quantization.fake_quant_with_min_max_args(out, min=1, max=6, name="out") - compare_tflite_with_tvm( - data, - "inq_0:0", - [inq_data], - [out], - quantized=True, - input_range=input_range, - experimental_new_converter=True, - ) - else: - - def _create_model(): - class Model(tf.Module): - @tf.function - def tf_function(self, x): - op = tf.math.rsqrt(x) - return op - - dtype = "int8" - model = Model() - - # Save the model - export_dir = tempfile.gettempdir() + "/tf_model" - tf.saved_model.save( - model, - export_dir, - signatures=model.tf_function.get_concrete_function( - tf.TensorSpec(data.shape, tf.float32, name="input"), - ), - ) - - # Convert the model - def representative_dataset(): - for _ in range(100): - tmp_data = np.random.rand(*tuple(data.shape)) - yield [tmp_data.astype(np.float32) * 2] - - converter = tf.lite.TFLiteConverter.from_saved_model(export_dir) - converter.optimizations = [tf.lite.Optimize.DEFAULT] - converter.representative_dataset = representative_dataset - converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] - converter.inference_input_type = tf.int8 - converter.inference_output_type = tf.int8 - tflite_model = converter.convert() - return tflite_model - - tflite_model_quant = _create_model() - tflite_output = run_tflite_graph(tflite_model_quant, data) - in_node = ["tfl.quantize"] - - tvm_output = run_tvm_graph(tflite_model_quant, data, in_node) - tvm.testing.assert_allclose( - np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-2 - ) - - -def _test_rsqrt(data, quantized=False): - """One iteration of RSQRT""" - if quantized: - _test_quant_rsqrt(data) - else: - with tf.Graph().as_default(): - in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype, name="in_0") - out = math_ops.rsqrt(in_data) - compare_tflite_with_tvm(data, "in_0:0", [in_data], [out]) - - -def test_forward_rsqrt(): - """RSQRT""" - _test_rsqrt(np.arange(1.0, 7.0, dtype=np.float32), quantized=False) - _test_rsqrt(np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3)), quantized=False) - # tensorflow version upgrade support - if tf.__version__ < LooseVersion("2.6.1"): - _test_rsqrt(np.arange(1, 240, 40, dtype=np.uint8), quantized=True) - _test_rsqrt(np.arange(1, 240, 40, dtype=np.uint8).reshape((2, 1, 3)), quantized=True) - else: - _test_rsqrt(np.arange(1, 240, 40, dtype=np.int8), quantized=True) - _test_rsqrt(np.arange(1, 240, 40, dtype=np.int8).reshape((2, 1, 3)), quantized=True) - - -####################################################################### -# SQRT -# ---- - - -def _test_sqrt(data, quantized=False): - """One iteration of SQRT""" - with tf.Graph().as_default(): - in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0") - - if quantized: - inq_data = tf.quantization.fake_quant_with_min_max_args( - in_data, min=1, max=6, name="inq_0" - ) - input_range = {"inq_0": (1, 6)} - out = math_ops.sqrt(inq_data) - out = tf.quantization.fake_quant_with_min_max_args(out, min=1, max=6, name="out") - compare_tflite_with_tvm( - data, - "inq_0:0", - [inq_data], - [out], - quantized=True, - input_range=input_range, - experimental_new_converter=True, - ) - else: - out = math_ops.sqrt(in_data) - compare_tflite_with_tvm(data, "in_0:0", [in_data], [out]) - - -def test_forward_sqrt(): - """SQRT""" - _test_sqrt(np.arange(1.0, 7.0, dtype=np.float32), quantized=False) - _test_sqrt(np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3)), quantized=False) - _test_sqrt(np.arange(1, 240, 40, dtype=np.uint8), quantized=True) - _test_sqrt(np.arange(1, 240, 40, dtype=np.uint8).reshape((2, 1, 3)), quantized=True) - - -####################################################################### -# NEG -# ---- - - -def _test_neg(data, quantized=False): - """One iteration of NEG""" - with tf.Graph().as_default(): - in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0") - - if quantized: - inq_data = tf.quantization.fake_quant_with_min_max_args( - in_data, min=1, max=6, name="inq_0" - ) - input_range = {"inq_0": (1, 6)} - out = math_ops.neg(inq_data) - out = tf.quantization.fake_quant_with_min_max_args(out, min=1, max=6, name="out") - compare_tflite_with_tvm( - data, - "inq_0:0", - [inq_data], - [out], - quantized=True, - input_range=input_range, - experimental_new_converter=True, - ) - else: - out = math_ops.neg(in_data) - compare_tflite_with_tvm(data, "in_0:0", [in_data], [out]) - - -def test_forward_neg(): - """NEG""" - _test_neg(np.arange(-2.0, 4.0, dtype=np.float32), quantized=False) - _test_neg(np.arange(-2.0, 4.0, dtype=np.float32).reshape((2, 1, 3)), quantized=False) - _test_neg(np.arange(1, 240, 40, dtype=np.uint8), quantized=True) - _test_neg(np.arange(1, 240, 40, dtype=np.uint8).reshape((2, 1, 3)), quantized=True) - - -####################################################################### -# ABS -# ---- - - -def _test_abs(data, quantized=False): - """One iteration of ABS""" - if quantized: - - def _create_model(): - class Model(tf.Module): - @tf.function - def tf_function(self, x): - op = tf.math.abs(x) - return op - - dtype = "int8" - model = Model() - - # Save the model - export_dir = tempfile.gettempdir() + "/tf_model" - tf.saved_model.save( - model, - export_dir, - signatures=model.tf_function.get_concrete_function( - tf.TensorSpec(data.shape, tf.float32, name="input"), - ), - ) - - # Convert the model - def representative_dataset(): - for _ in range(100): - tmp_data = np.random.rand(*tuple(data.shape)) - yield [tmp_data.astype(np.float32) * 2 - 1] - - converter = tf.lite.TFLiteConverter.from_saved_model(export_dir) - converter.optimizations = [tf.lite.Optimize.DEFAULT] - converter.representative_dataset = representative_dataset - converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] - converter.inference_input_type = tf.int8 - converter.inference_output_type = tf.int8 - tflite_model = converter.convert() - return tflite_model - - tflite_model_quant = _create_model() - tflite_output = run_tflite_graph(tflite_model_quant, data) - - # TFLite 2.6.x upgrade support - if tf.__version__ < LooseVersion("2.6.1"): - in_node = ["serving_default_input_int8"] - else: - in_node = ["tfl.quantize"] - - tvm_output = run_tvm_graph(tflite_model_quant, data, in_node) - tvm.testing.assert_allclose( - np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-2 - ) - else: - with tf.Graph().as_default(): - in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype, name="in_0") - out = math_ops.abs(in_data) - compare_tflite_with_tvm(data, "in_0:0", [in_data], [out]) - - -def test_forward_abs(): - """ABS""" - _test_abs(np.arange(-3.0, 3.0, dtype=np.float32), quantized=False) - _test_abs(np.arange(-3.0, 3.0, dtype=np.float32).reshape((2, 1, 3)), quantized=False) - _test_abs(np.arange(-128, 127, 45, dtype=np.int8), quantized=True) - - ####################################################################### # ReLu # ---- @@ -4916,11 +4801,6 @@ def test_prevent_tensorflow_dynamic_range(): test_forward_l2_pool2d() test_forward_softmax() test_forward_tanh() - test_forward_rsqrt() - test_forward_neg() - test_forward_sin() - test_forward_abs() - test_forward_sqrt() test_forward_relu() test_forward_relu6() test_forward_leaky_relu()