Skip to content

Commit

Permalink
[ONNX] [apache#8838] QLinearLeakyRelu contrib op (apache#9063)
Browse files Browse the repository at this point in the history
* [ONNX] QLinearLeakyRelu contrib op

* Add comment

* jostle ci

* jostle ci
  • Loading branch information
gayatripk1 authored and ylc committed Jan 7, 2022
1 parent 8382d3d commit dea4b36
Show file tree
Hide file tree
Showing 2 changed files with 44 additions and 0 deletions.
23 changes: 23 additions & 0 deletions python/tvm/relay/frontend/onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -3531,6 +3531,28 @@ def _impl_v10(cls, inputs, attr, params):
return _qnn.op.quantize(out, y_scale, y_zero_point, out_dtype=dtype)


class QLinearLeakyRelu(OnnxOpConverter):
"""Operator converter for QLinearLeakyRelu from Microsoft onnxruntime contrib opset."""

@classmethod
def _impl_v10(cls, inputs, attr, params):

a_scale = get_scalar(inputs[1], params)
a_zero_point = get_scalar(inputs[2], params, "int32")
y_scale = fold_constant(get_scalar(inputs[3], params))
y_zero_point = get_scalar(inputs[4], params, "int32")
alpha = float(attr.get("alpha", 1.0))

dtype = infer_type(inputs[0]).checked_type.dtype

# Onnxruntime doesn't actually do this op in integer, they dequantize to fp32
# and then requantize afer (according to documentation below)
# https://github.com/microsoft/onnxruntime/blob/master/docs/ContribOperators.md#com.microsoft.QLinearLeakyRelu
a = _qnn.op.dequantize(inputs[0], a_scale, a_zero_point)
out = _op.nn.leaky_relu(a, alpha)
return _qnn.op.quantize(out, y_scale, y_zero_point, out_dtype=dtype)


class QLinearSigmoid(OnnxOpConverter):
"""Operator converter for QLinearSigmoid from Microsoft onnxruntime contrib opset."""

Expand Down Expand Up @@ -4217,6 +4239,7 @@ def _get_convert_map(opset):
"ConvInteger": ConvInteger.get_converter(opset),
"QLinearAveragePool": QLinearAveragePool.get_converter(opset),
"QLinearGlobalAveragePool": QLinearGlobalAveragePool.get_converter(opset),
"QLinearLeakyRelu": QLinearLeakyRelu.get_converter(opset),
# Random number generation.
"RandomUniform": RandomUniform.get_converter(opset),
# Loss functions / training
Expand Down
21 changes: 21 additions & 0 deletions tests/python/frontend/onnx/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -5525,6 +5525,27 @@ def verify_qlinearmul(a_shape, b_shape, c_shape):
verify_qlinearmul([5, 1, 7], [2, 7], [5, 2, 7])


@tvm.testing.parametrize_targets
def test_qlinearleakyrelu(target, dev):
def verify_qlinearleakyrelu(inshape, kwargs):

in_array = np.random.random(inshape).astype("float32")
node = helper.make_node("LeakyRelu", ["X"], ["Y"], **kwargs)

graph = helper.make_graph(
[node],
"qlinearRelu_test",
inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, list(in_array.shape))],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(in_array.shape))],
)
model = helper.make_model(graph, producer_name="qlinearRelu_test")
quantize_and_verify_with_ort(model, ["X"], [in_array.shape], target, dev)

verify_qlinearleakyrelu([2, 4, 5, 6], {"alpha": 0.25})
verify_qlinearleakyrelu([6, 5, 6, 7], {"alpha": 0.35})
verify_qlinearleakyrelu([5, 1, 4, 6], {"alpha": 0.65})


@tvm.testing.parametrize_targets
def test_qlinearsigmoid(target, dev):
def verify_qlinearsigmoid(a_shape):
Expand Down

0 comments on commit dea4b36

Please sign in to comment.