From 359b2ab7c5c7b188509ecbacb46b50a4fb38d4b2 Mon Sep 17 00:00:00 2001 From: heliqi <1101791222@qq.com> Date: Fri, 10 Sep 2021 17:01:23 +0800 Subject: [PATCH 1/2] add maxinum mininum nonzero op --- python/tvm/relay/frontend/paddlepaddle.py | 37 ++++++++++++++++++- .../frontend/paddlepaddle/test_forward.py | 30 +++++++++++++-- 2 files changed, 63 insertions(+), 4 deletions(-) diff --git a/python/tvm/relay/frontend/paddlepaddle.py b/python/tvm/relay/frontend/paddlepaddle.py index 5c3f50a12caa..a402eaa2b06c 100644 --- a/python/tvm/relay/frontend/paddlepaddle.py +++ b/python/tvm/relay/frontend/paddlepaddle.py @@ -485,6 +485,8 @@ def convert_elementwise_op(g, op, block): "elementwise_mul": "multiply", "elementwise_sub": "subtract", "elementwise_mod": "mod", + "elementwise_max": "maximum", + "elementwise_min": "minimum", "elementwise_pow": "power", "elementwise_floordiv": "floor_divide", "floor_mod": "floor_mod", @@ -937,6 +939,25 @@ def convert_mul(g, op, block): g.add_node(op.output("Out")[0], out) +def convert_numel(g, op, block): + """Operator converter for numel.""" + + input_x = g.get_node(op.input("Input")[0]) + out = _op.ndarray_size(input_x) + out = _op.expand_dims(out, axis=0) + g.add_node(op.output("Out")[0], out) + + +def convert_nonzero(g, op, block): + """Operator converter for nonzero.""" + + input_x = g.get_node(op.input("Condition")[0]) + out = _op.transform.argwhere(input_x) + # Paddle NonZero always outputs int64 + out = _op.cast(out, "int64") + g.add_node(op.output("Out")[0], out) + + def convert_pool2d(g, op, block): """Operator converter for pool2d.""" @@ -1055,6 +1076,15 @@ def convert_range(g, op, block): g.add_node(op.output("Out")[0], out) +def convert_reciprocal(g, op, block): + """Operator converter for reciprocal.""" + + x = g.get_node(op.input("X")[0]) + dtype = infer_type(x).checked_type.dtype + out = _expr.const(1.0, dtype) / x + g.add_node(op.output("Out")[0], out) + + def convert_reduce(g, op, block): """Operator converter for reduce.""" @@ -1250,7 +1280,7 @@ def convert_scale(g, op, block): bias_after_scale = op.attr("bias_after_scale") x = g.get_node(op.input("X")[0]) if np.isclose(scale, 1.0) and np.isclose(bias, 0.0): - out = _op.copy(x) + out = x else: if np.isclose(bias, 0.0): out = x * _expr.const(np.array(scale).astype("float32")) @@ -1516,6 +1546,8 @@ def convert_unsqueeze(g, op, block): "elementwise_mul": convert_elementwise_op, "elementwise_sub": convert_elementwise_op, "elementwise_mod": convert_elementwise_op, + "elementwise_max": convert_elementwise_op, + "elementwise_min": convert_elementwise_op, "elementwise_pow": convert_elementwise_op, "elementwise_floordiv": convert_elementwise_op, "equal": convert_elementwise_op, @@ -1557,6 +1589,7 @@ def convert_unsqueeze(g, op, block): "pad3d": convert_padding, "pow": convert_pow, "range": convert_range, + "reciprocal": convert_reciprocal, "reduce_all": convert_reduce, "reduce_any": convert_reduce, "reduce_max": convert_reduce, @@ -1572,6 +1605,7 @@ def convert_unsqueeze(g, op, block): "shape": convert_shape, "sigmoid": convert_unary_op, "sin": convert_unary_op, + "size": convert_numel, "slice": convert_slice, "softmax": convert_softmax, "split": convert_split, @@ -1584,6 +1618,7 @@ def convert_unsqueeze(g, op, block): "tile": convert_tile, "transpose2": convert_transpose, "unsqueeze2": convert_unsqueeze, + "where_index": convert_nonzero, } diff --git a/tests/python/frontend/paddlepaddle/test_forward.py b/tests/python/frontend/paddlepaddle/test_forward.py index f7ced14d7c69..8ba52627ccdc 100644 --- a/tests/python/frontend/paddlepaddle/test_forward.py +++ b/tests/python/frontend/paddlepaddle/test_forward.py @@ -89,10 +89,9 @@ def verify_model(func, input_data, rtol=1e-5, atol=1e-5, input_shape=None): input_name = "input{}".format(idx) if input_shape: shape = input_shape[idx] - input_shape_dict[input_name] = [relay.Any()] * len(shape) else: shape = data.shape - input_shape_dict[input_name] = shape + input_shape_dict[input_name] = shape input_spec.append(paddle.static.InputSpec(dtype=data.dtype, shape=shape, name=input_name)) input_names.append(input_name) if isinstance(data, np.ndarray): @@ -158,6 +157,8 @@ def forward(self, inputs): "log", "log10", "log1p", + "numel", + "reciprocal", "relu", "rsqrt", "sigmoid", @@ -631,6 +632,7 @@ def test_forward_elemwise(): class ElemwiseOp(nn.Layer): def __init__(self, op_name): super(ElemwiseOp, self).__init__() + self.op_name_ = op_name for candidate in (paddle, paddle.nn.functional): self.func = getattr(candidate, op_name, None) if self.func: @@ -639,11 +641,15 @@ def __init__(self, op_name): @paddle.jit.to_static def forward(self, input1, input2): y = self.func(input1, input2) - return paddle.cast(y, "int32") + if "equal" in self.op_name_ or "than" in self.op_name_: + y = paddle.cast(y, "int32") + return y op_list = [ "floor_divide", "floor_mod", + "maximum", + "minimum", "equal", "greater_than", "less_equal", @@ -911,6 +917,23 @@ def forward(self, input1, input2): verify_model(MatMul1(), input_data=[input_data1, input_data2]) +@tvm.testing.uses_gpu +def test_forward_nonzero(): + class Nonzero(nn.Layer): + def __init__(self, as_tuple=False): + super().__init__() + self.as_tuple = as_tuple + + def forward(self, inputs): + return paddle.nonzero(inputs, self.as_tuple) + + x1 = paddle.to_tensor([[1.0, 0.0, 0.0, 2.0], [0.0, 2.0, 0.0, 1.1], [0.0, 0.0, 3.0, 0.0]]) + verify_model(Nonzero(), x1, input_shape=[[3, 4]]) + verify_model(Nonzero(True), x1, input_shape=[[3, 4]]) + x2 = paddle.to_tensor([0, 1, 0, 3]) + verify_model(Nonzero(), x2, input_shape=[[3, 4]]) + + @tvm.testing.uses_gpu def test_forward_pool2d(): @paddle.jit.to_static @@ -1320,6 +1343,7 @@ def tile3(inputs, inputs2): test_forward_lstm() test_forward_matmul() test_forward_multiply() + test_forward_nonzero() test_forward_pool2d() test_forward_pad() test_forward_pow() From fe26401be4027d9a836b386e4025bc118f8358a8 Mon Sep 17 00:00:00 2001 From: heliqi <1101791222@qq.com> Date: Fri, 10 Sep 2021 17:49:11 +0800 Subject: [PATCH 2/2] add slice strides params --- python/tvm/relay/frontend/paddlepaddle.py | 11 ++++++----- tests/python/frontend/paddlepaddle/test_forward.py | 11 ++++++++++- 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/python/tvm/relay/frontend/paddlepaddle.py b/python/tvm/relay/frontend/paddlepaddle.py index a402eaa2b06c..8bc4d77a20a3 100644 --- a/python/tvm/relay/frontend/paddlepaddle.py +++ b/python/tvm/relay/frontend/paddlepaddle.py @@ -1310,7 +1310,7 @@ def convert_slice(g, op, block): """Operator converter for slice.""" data = g.get_node(op.input("Input")[0]) - dims = len(block.var(op.input("Input")[0]).shape) + dims = len(infer_shape(data)) dtype = "int64" axes = op.attr("axes") @@ -1335,21 +1335,20 @@ def convert_slice(g, op, block): else: starts = op.attr("starts") starts = _expr.const(starts) + start_dtype = infer_type(starts).checked_type.dtype if isinstance(starts, _expr.Expr): starts = _op.scatter( - _op.const([0] * dims, dtype=infer_type(starts).checked_type.dtype), + _op.const([0] * dims, dtype=start_dtype), axes, starts, axis=0, ) - data_shape = shape_of(data) ends = op.input("EndsTensor") if ends: ends = g.get_node(ends[0]) elif op.input("EndsTensorList"): ends = [] - data_shape = data_shape.astype(dtype) for end_index in op.input("EndsTensorList"): end_index = g.get_node(end_index) if not isinstance(end_index, _expr.Expr): @@ -1362,9 +1361,11 @@ def convert_slice(g, op, block): ends = op.attr("ends") ends = _expr.const(ends) if isinstance(ends, _expr.Expr): + data_shape = shape_of(data, infer_type(ends).checked_type.dtype) ends = _op.scatter(data_shape, axes, ends, axis=0) - out = _op.strided_slice(data, begin=starts, end=ends) + strides = _op.const([1] * dims, dtype=start_dtype) + out = _op.strided_slice(data, begin=starts, end=ends, strides=strides) if decrease_axis: out = _op.squeeze(out, axis=decrease_axis) g.add_node(op.output("Out")[0], out) diff --git a/tests/python/frontend/paddlepaddle/test_forward.py b/tests/python/frontend/paddlepaddle/test_forward.py index 8ba52627ccdc..655e12de9330 100644 --- a/tests/python/frontend/paddlepaddle/test_forward.py +++ b/tests/python/frontend/paddlepaddle/test_forward.py @@ -924,6 +924,7 @@ def __init__(self, as_tuple=False): super().__init__() self.as_tuple = as_tuple + @paddle.jit.to_static def forward(self, inputs): return paddle.nonzero(inputs, self.as_tuple) @@ -931,7 +932,15 @@ def forward(self, inputs): verify_model(Nonzero(), x1, input_shape=[[3, 4]]) verify_model(Nonzero(True), x1, input_shape=[[3, 4]]) x2 = paddle.to_tensor([0, 1, 0, 3]) - verify_model(Nonzero(), x2, input_shape=[[3, 4]]) + verify_model( + Nonzero(), + x2, + input_shape=[ + [ + 4, + ] + ], + ) @tvm.testing.uses_gpu