From d330473fa4e840a71b4404b78e4c21eaf2cf83ed Mon Sep 17 00:00:00 2001 From: wjj19950828 Date: Tue, 14 Sep 2021 14:45:23 +0800 Subject: [PATCH 1/3] fixed code format --- python/tvm/relay/frontend/paddlepaddle.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/python/tvm/relay/frontend/paddlepaddle.py b/python/tvm/relay/frontend/paddlepaddle.py index ca96ef779e6f..47b20c8af84e 100644 --- a/python/tvm/relay/frontend/paddlepaddle.py +++ b/python/tvm/relay/frontend/paddlepaddle.py @@ -486,7 +486,7 @@ def convert_elementwise_op(g, op, block): "elementwise_sub": lambda x, y: x - y, "elementwise_mod": _op.mod, "elementwise_pow": _op.power, - "elementwise_floordiv": _op.floor_divide + "elementwise_floordiv": _op.floor_divide, } op_func = op_map[op.type] ipt0 = g.get_node(op.input("X")[0]) @@ -602,10 +602,12 @@ def convert_fill_constant_batch_size_like(g, op, block): dtype = block.var(op.output("Out")[0]).dtype dtype = str(dtype).strip().split(".")[1] input_shape = shape_of(x) - batch = _op.strided_slice(input_shape, begin=[input_dim_idx], end=[input_dim_idx+1]).astype("int32") + batch = _op.strided_slice(input_shape, begin=[input_dim_idx], end=[input_dim_idx + 1]).astype( + "int32" + ) shape_before = shape[:output_dim_idx] shape_before = _expr.const(shape_before, dtype="int32") - shape_after = shape[output_dim_idx+1:] + shape_after = shape[output_dim_idx + 1 :] shape_after = _expr.const(shape_after, dtype="int32") out_shape = _op.concatenate([shape_before, batch, shape_after], axis=0) @@ -1413,7 +1415,7 @@ def convert_topk(g, op, block): g.add_node(op.output("Out")[0], outs[0]) g.add_node(op.output("Indices")[0], outs[1]) - + def convert_stack(g, op, block): """Operator converter for stack.""" From 0590833240a04497deb9aa561abcd81da7e7358e Mon Sep 17 00:00:00 2001 From: wjj19950828 Date: Tue, 14 Sep 2021 16:53:48 +0800 Subject: [PATCH 2/3] add greater_equal isfinite and isnan ops --- python/tvm/relay/frontend/paddlepaddle.py | 20 +++ .../frontend/paddlepaddle/test_forward.py | 157 ++++++++++++------ 2 files changed, 127 insertions(+), 50 deletions(-) diff --git a/python/tvm/relay/frontend/paddlepaddle.py b/python/tvm/relay/frontend/paddlepaddle.py index 665c88629517..b865fefa2b1e 100644 --- a/python/tvm/relay/frontend/paddlepaddle.py +++ b/python/tvm/relay/frontend/paddlepaddle.py @@ -85,6 +85,8 @@ def convert_unary_op(g, op, block): op_map = { "isinf_v2": _op.isinf, + "isfinite_v2": _op.isfinite, + "isnan_v2": _op.isnan, } if op.type in op_map: unary_func = op_map[op.type] @@ -491,6 +493,7 @@ def convert_elementwise_op(g, op, block): "elementwise_floordiv": "floor_divide", "floor_mod": "floor_mod", "equal": "equal", + "greater_equal": "greater_equal", "greater_than": "greater", "less_equal": "less_equal", "less_than": "less", @@ -782,6 +785,16 @@ def convert_log1p(g, op, block): g.add_node(op.output("Out")[0], out) +def convert_logical_op(g, op, block): + """Operator converter for logical op.""" + + ipt0 = g.get_node(op.input("X")[0]) + ipt1 = g.get_node(op.input("Y")[0]) + op_func = get_relay_op(op.type) + out = op_func(ipt0, ipt1) + g.add_node(op.output("Out")[0], out) + + def convert_logsumexp(g, op, block): """Operator converter for logsumexp.""" @@ -1602,12 +1615,17 @@ def convert_unsqueeze(g, op, block): "gather": convert_gather, "gather_nd": convert_gather_nd, "gelu": convert_gelu, + "greater_equal": convert_elementwise_op, "greater_than": convert_elementwise_op, "hard_sigmoid": convert_hard_sigmoid, "hard_swish": convert_hard_swish, "index_select": convert_index_select, + "isfinite": convert_unary_op, + "isfinite_v2": convert_unary_op, "isinf": convert_unary_op, "isinf_v2": convert_unary_op, + "isnan": convert_unary_op, + "isnan_v2": convert_unary_op, "layer_norm": convert_layer_norm, "leaky_relu": convert_leaky_relu, "less_equal": convert_elementwise_op, @@ -1619,6 +1637,8 @@ def convert_unsqueeze(g, op, block): "log10": convert_unary_op, "log1p": convert_log1p, "logsumexp": convert_logsumexp, + "logical_and": convert_logical_op, + "logical_or": convert_logical_op, "matmul": convert_matmul, "matmul_v2": convert_matmul, "mul": convert_mul, diff --git a/tests/python/frontend/paddlepaddle/test_forward.py b/tests/python/frontend/paddlepaddle/test_forward.py index cc63da231804..925a8329a0e5 100644 --- a/tests/python/frontend/paddlepaddle/test_forward.py +++ b/tests/python/frontend/paddlepaddle/test_forward.py @@ -672,6 +672,7 @@ def forward(self, input1, input2): "maximum", "minimum", "equal", + "greater_equal", "greater_than", "less_equal", "less_than", @@ -777,6 +778,17 @@ def index_select2(x, index): verify_model(index_select2, input_data=[input_data, index]) +@tvm.testing.uses_gpu +def test_forward_isfinite(): + @paddle.jit.to_static + def isfinite(inputs): + return paddle.cast(paddle.isfinite(inputs), "int32") + + input_shape = [5, 5] + input_data = paddle.rand(input_shape, dtype="float32") + verify_model(isfinite, input_data=input_data) + + @tvm.testing.uses_gpu def test_forward_isinf(): @paddle.jit.to_static @@ -788,6 +800,17 @@ def isinf(inputs): verify_model(isinf, input_data=input_data) +@tvm.testing.uses_gpu +def test_forward_isnan(): + @paddle.jit.to_static + def isnan(inputs): + return paddle.cast(paddle.isnan(inputs), "int32") + + input_shape = [5, 5] + input_data = paddle.rand(input_shape, dtype="float32") + verify_model(isnan, input_data=input_data) + + @tvm.testing.uses_gpu def test_forward_interpolate(): class TestBilinear(nn.Layer): @@ -863,6 +886,37 @@ def leaky_relu(inputs): verify_model(leaky_relu, input_data=input_data) +@tvm.testing.uses_gpu +def test_forward_logical_op(): + class LogicalOp(nn.Layer): + def __init__(self, op_name, out=False): + super(LogicalOp, self).__init__() + self.out = out + for candidate in (paddle, paddle.nn.functional): + self.func = getattr(candidate, op_name, None) + if self.func: + break + + @paddle.jit.to_static + def forward(self, x, y): + if self.out: + out = paddle.to_tensor([True, True, True]) + z = self.func(x, y, out=out) + else: + z = self.func(x, y) + return paddle.cast(z, "int32") + + op_list = [ + "logical_and", + "logical_or", + ] + x = paddle.to_tensor([True]) + y = paddle.to_tensor([True, False, True, False]) + for op_name in op_list: + verify_model(LogicalOp(op_name, False), [x, y]) + verify_model(LogicalOp(op_name, True), [x, y]) + + @tvm.testing.uses_gpu def test_forward_look_up(): @paddle.jit.to_static @@ -1444,53 +1498,56 @@ def zeros2(inputs): if __name__ == "__main__": - test_forward_add_subtract() - test_forward_addmm() - test_forward_arange() - test_forward_argmax() - test_forward_argmin() - test_forward_assign() - test_forward_batch_norm() - test_forward_cast() - test_forward_concat_unsqueeze() - test_forward_conv() - test_forward_crop() - test_forward_cumsum() - test_forward_dot() - test_forward_dropout() - test_forward_elemwise() - test_forward_expand() - test_forward_flatten() - test_forward_shape_full() - test_forward_ones() - test_forward_ones_like() - test_forward_gather_assign_value() - test_forward_gather_nd() - test_forward_gelu() - test_forward_hard_sigmoid() - test_forward_hard_swish() - test_forward_index_select() - test_forward_interpolate() - test_forward_isinf() - test_forward_layer_norm() - test_forward_leaky_relu() - test_forward_look_up() - test_forward_lstm() - test_forward_matmul() - test_forward_multiply() - test_forward_nonzero() - test_forward_norm() - test_forward_pool2d() - test_forward_pad() - test_forward_pow() - test_forward_reduce_op() - test_forward_reshape() - test_forward_scale() - test_forward_slice() - test_forward_split() - test_forward_squeeze2() - test_forward_topk() - test_forward_tile() - test_forward_conv_transpose() - test_forward_unary_op() - test_forward_zeros() + # test_forward_add_subtract() + # test_forward_addmm() + # test_forward_arange() + # test_forward_argmax() + # test_forward_argmin() + # test_forward_assign() + # test_forward_batch_norm() + # test_forward_cast() + # test_forward_concat_unsqueeze() + # test_forward_conv() + # test_forward_crop() + # test_forward_cumsum() + # test_forward_dot() + # test_forward_dropout() + # test_forward_elemwise() + # test_forward_expand() + # test_forward_flatten() + # test_forward_shape_full() + # test_forward_ones() + # test_forward_ones_like() + # test_forward_gather_assign_value() + # test_forward_gather_nd() + # test_forward_gelu() + # test_forward_hard_sigmoid() + # test_forward_hard_swish() + # test_forward_index_select() + # test_forward_interpolate() + # test_forward_isfinite() + # test_forward_isinf() + # test_forward_isnan() + # test_forward_layer_norm() + # test_forward_leaky_relu() + test_forward_logical_op() + # test_forward_look_up() + # test_forward_lstm() + # test_forward_matmul() + # test_forward_multiply() + # test_forward_nonzero() + # test_forward_norm() + # test_forward_pool2d() + # test_forward_pad() + # test_forward_pow() + # test_forward_reduce_op() + # test_forward_reshape() + # test_forward_scale() + # test_forward_slice() + # test_forward_split() + # test_forward_squeeze2() + # test_forward_topk() + # test_forward_tile() + # test_forward_conv_transpose() + # test_forward_unary_op() + # test_forward_zeros() From 4f96c08158733f2301fcecf15e2eae835fe56044 Mon Sep 17 00:00:00 2001 From: wjj19950828 Date: Wed, 15 Sep 2021 21:26:10 +0800 Subject: [PATCH 3/3] add logical op --- python/tvm/relay/frontend/paddlepaddle.py | 11 +++++++++++ tests/python/frontend/paddlepaddle/test_forward.py | 13 +++++++++++++ 2 files changed, 24 insertions(+) diff --git a/python/tvm/relay/frontend/paddlepaddle.py b/python/tvm/relay/frontend/paddlepaddle.py index d79367fb120b..bc1fb682eaa2 100644 --- a/python/tvm/relay/frontend/paddlepaddle.py +++ b/python/tvm/relay/frontend/paddlepaddle.py @@ -981,6 +981,15 @@ def convert_logical_op(g, op, block): g.add_node(op.output("Out")[0], out) +def convert_logical_not(g, op, block): + """Operator converter for logical_not op.""" + + ipt0 = g.get_node(op.input("X")[0]) + op_func = get_relay_op(op.type) + out = op_func(ipt0) + g.add_node(op.output("Out")[0], out) + + def convert_logsumexp(g, op, block): """Operator converter for logsumexp.""" @@ -1887,7 +1896,9 @@ def convert_where(g, op, block): "log10": convert_unary_op, "log1p": convert_log1p, "logical_and": convert_logical_op, + "logical_not": convert_logical_not, "logical_or": convert_logical_op, + "logical_xor": convert_logical_op, "logsumexp": convert_logsumexp, "matmul": convert_matmul, "matmul_v2": convert_matmul, diff --git a/tests/python/frontend/paddlepaddle/test_forward.py b/tests/python/frontend/paddlepaddle/test_forward.py index 39ca424002e8..8d2bd742ba7f 100644 --- a/tests/python/frontend/paddlepaddle/test_forward.py +++ b/tests/python/frontend/paddlepaddle/test_forward.py @@ -1036,8 +1036,19 @@ def forward(self, x, y): z = self.func(x, y) return paddle.cast(z, "int32") + class LogicalOp_not(LogicalOp): + @paddle.jit.to_static + def forward(self, x): + if self.out: + out = paddle.to_tensor([True, True, True]) + z = self.func(x, out=out) + else: + z = self.func(x) + return paddle.cast(z, "int32") + op_list = [ "logical_or", + "logical_xor", "logical_and", ] x = paddle.to_tensor([True]) @@ -1045,6 +1056,8 @@ def forward(self, x, y): for op_name in op_list: verify_model(LogicalOp(op_name, False), [x, y]) verify_model(LogicalOp(op_name, True), [x, y]) + verify_model(LogicalOp_not("logical_not", False), [y]) + verify_model(LogicalOp_not("logical_not", True), [y]) @tvm.testing.uses_gpu