Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CINN] Enable CINN unittest on atan2, tile, top_k, where #54280

Merged
merged 16 commits into from
Jun 13, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions test/legacy_test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -1190,12 +1190,17 @@ set(TEST_CINN_OPS
test_roll_op
test_sum_op
test_elementwise_min_op
test_atan2_op
test_top_k_op
test_where_op
test_take_along_axis_op
test_arg_min_max_op
test_reverse_op
test_flip
test_triangular_solve_op
test_scatter_nd_op
test_strided_slice_op
test_pool2d_op
test_instance_norm_op
test_cumsum_op
test_pad_op
Expand Down
13 changes: 9 additions & 4 deletions test/legacy_test/test_atan2_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ class TestAtan2(OpTest):
def setUp(self):
self.op_type = "atan2"
self.python_api = paddle.atan2
self.check_cinn = True
self.init_dtype()

x1 = np.random.uniform(-1, -0.1, [15, 17]).astype(self.dtype)
Expand All @@ -44,10 +45,10 @@ def setUp(self):
self.outputs = {'Out': out}

def test_check_grad(self):
self.check_grad(['X1', 'X2'], 'Out')
self.check_grad(['X1', 'X2'], 'Out', check_cinn=self.check_cinn)

def test_check_output(self):
self.check_output()
self.check_output(check_cinn=self.check_cinn)

def init_dtype(self):
self.dtype = np.float64
Expand All @@ -67,6 +68,7 @@ def test_check_grad(self):
self.inputs['X2'],
1 / self.inputs['X1'].size,
),
check_cinn=self.check_cinn,
)


Expand Down Expand Up @@ -139,6 +141,7 @@ def setUp(self):
self.op_type = 'atan2'
self.python_api = paddle.atan2
self.dtype = np.uint16
self.check_cinn = True
x1 = np.random.uniform(-1, -0.1, [15, 17]).astype('float32')
x2 = np.random.uniform(0.1, 1, [15, 17]).astype('float32')
out = np.arctan2(x1, x2)
Expand All @@ -151,11 +154,13 @@ def setUp(self):

def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place)
self.check_output_with_place(place, check_cinn=self.check_cinn)

def test_check_grad(self):
place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['X1', 'X2'], 'Out')
self.check_grad_with_place(
place, ['X1', 'X2'], 'Out', check_cinn=self.check_cinn
)


class TestAtan2Error(unittest.TestCase):
Expand Down
18 changes: 15 additions & 3 deletions test/legacy_test/test_pool2d_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -422,7 +422,10 @@ def test_check_output(self):
if self.has_cudnn():
place = core.CUDAPlace(0)
self.check_output_with_place(
place, atol=1e-5, check_dygraph=(not self.use_mkldnn)
place,
atol=1e-5,
check_dygraph=(not self.use_mkldnn),
check_cinn=True,
)
else:
self.check_output(check_dygraph=(not self.use_mkldnn))
Expand All @@ -438,6 +441,7 @@ def test_check_grad(self):
{'X'},
'Out',
check_dygraph=(not self.use_mkldnn),
check_cinn=True,
)
elif self.pool_type != "max":
self.check_grad(
Expand Down Expand Up @@ -587,6 +591,7 @@ def test_check_output(self):
self.check_output_with_place(
place,
check_dygraph=(not self.use_mkldnn),
check_cinn=True,
)

def test_check_grad(self):
Expand All @@ -602,6 +607,7 @@ def test_check_grad(self):
{'X'},
'Out',
check_dygraph=(not self.use_mkldnn),
check_cinn=True,
)

cls_name = "{}_{}".format(parent.__name__, "CUDNNFp16Op")
Expand All @@ -626,6 +632,7 @@ def test_check_output(self):
self.check_output_with_place(
place,
check_dygraph=(not self.use_mkldnn),
check_cinn=True,
)

def test_check_grad(self):
Expand All @@ -641,6 +648,7 @@ def test_check_grad(self):
{'X'},
'Out',
check_dygraph=(not self.use_mkldnn),
check_cinn=True,
)

cls_name = "{}_{}".format(parent.__name__, "Fp16Op")
Expand All @@ -663,6 +671,7 @@ def test_check_output(self):
self.check_output_with_place(
place,
check_dygraph=(not self.use_mkldnn),
check_cinn=True,
)

def test_check_grad(self):
Expand All @@ -673,6 +682,7 @@ def test_check_grad(self):
{'X'},
'Out',
check_dygraph=(not self.use_mkldnn),
check_cinn=True,
)

cls_name = "{}_{}".format(parent.__name__, "Bf16Op")
Expand Down Expand Up @@ -1002,10 +1012,12 @@ def test_check_grad(self):
if self.has_cudnn() and self.pool_type == "max":
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, {'X'}, 'Out', max_relative_error=1.00
place, {'X'}, 'Out', max_relative_error=1.00, check_cinn=True
)
elif self.pool_type == "max":
self.check_grad({'X'}, 'Out', max_relative_error=1.00)
self.check_grad(
{'X'}, 'Out', max_relative_error=1.00, check_cinn=True
)


class TestCase5_channel_last_Max(TestCase5_Max):
Expand Down
12 changes: 8 additions & 4 deletions test/legacy_test/test_take_along_axis_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ def setUp(self):
self.init_data()
self.op_type = "take_along_axis"
self.python_api = paddle.tensor.take_along_axis
self.check_cinn = True
self.xnp = np.random.random(self.x_shape).astype(self.x_type)
self.target = np.take_along_axis(self.xnp, self.index, self.axis)
broadcast_shape_list = list(self.x_shape)
Expand All @@ -42,10 +43,10 @@ def setUp(self):
self.outputs = {'Result': self.target}

def test_check_output(self):
self.check_output()
self.check_output(check_cinn=self.check_cinn)

def test_check_grad(self):
self.check_grad(['Input'], 'Result')
self.check_grad(['Input'], 'Result', check_cinn=self.check_cinn)

def init_data(self):
self.x_type = "float64"
Expand Down Expand Up @@ -81,6 +82,7 @@ def setUp(self):
self.init_data()
self.op_type = "take_along_axis"
self.python_api = paddle.tensor.take_along_axis
self.check_cinn = True
self.xnp = np.random.random(self.x_shape).astype(self.x_type)
self.target = np.take_along_axis(self.xnp, self.index, self.axis)
broadcast_shape_list = list(self.x_shape)
Expand All @@ -99,10 +101,12 @@ def setUp(self):
self.place = core.CUDAPlace(0)

def test_check_output(self):
self.check_output_with_place(self.place)
self.check_output_with_place(self.place, check_cinn=self.check_cinn)

def test_check_grad(self):
self.check_grad_with_place(self.place, ['Input'], 'Result')
self.check_grad_with_place(
self.place, ['Input'], 'Result', check_cinn=self.check_cinn
)

def init_data(self):
self.dtype = np.uint16
Expand Down
58 changes: 50 additions & 8 deletions test/legacy_test/test_tile_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,14 +40,14 @@ def setUp(self):
self.outputs = {'Out': output}

def if_enable_cinn(self):
pass
self.check_cinn = True

def init_data(self):
self.ori_shape = [100]
self.repeat_times = [2]

def test_check_output(self):
self.check_output()
self.check_output(check_cinn=self.check_cinn)

def test_check_grad(self):
self.check_grad(['X'], 'Out', check_prim=True)
Expand All @@ -59,6 +59,7 @@ def init_data(self):
self.repeat_times = []

def if_enable_cinn(self):
self.check_cinn = False
self.enable_cinn = False


Expand All @@ -68,6 +69,7 @@ def init_data(self):
self.repeat_times = [2]

def if_enable_cinn(self):
self.check_cinn = False
self.enable_cinn = False


Expand All @@ -77,6 +79,7 @@ def init_data(self):
self.repeat_times = [2, 3]

def if_enable_cinn(self):
self.check_cinn = False
self.enable_cinn = False


Expand All @@ -86,38 +89,57 @@ def init_data(self):
self.ori_shape = [120]
self.repeat_times = [2, 2]

def if_enable_cinn(self):
self.check_cinn = True


class TestTileOpRank2(TestTileOpRank1):
def init_data(self):
self.ori_shape = [12, 14]
self.repeat_times = [2, 3]

def if_enable_cinn(self):
self.check_cinn = True


class TestTileOpRank3_Corner(TestTileOpRank1):
def init_data(self):
self.ori_shape = (2, 10, 5)
self.repeat_times = (1, 1, 1)

def if_enable_cinn(self):
self.check_cinn = True


class TestTileOpRank3_Corner2(TestTileOpRank1):
def init_data(self):
self.ori_shape = (2, 10, 5)
self.repeat_times = (2, 2)

def if_enable_cinn(self):
self.check_cinn = True


class TestTileOpRank3(TestTileOpRank1):
def init_data(self):
self.ori_shape = (2, 4, 15)
self.repeat_times = (2, 1, 4)

def if_enable_cinn(self):
self.check_cinn = True


class TestTileOpRank4(TestTileOpRank1):
def init_data(self):
self.ori_shape = (2, 4, 5, 7)
self.repeat_times = (3, 2, 1, 2)

def if_enable_cinn(self):
self.check_cinn = True


# Situation 2: repeat_times is a list (with tensor)
# CINN not support repeat_times is a tensor now
class TestTileOpRank1_tensor_attr(OpTest):
def setUp(self):
self.op_type = "tile"
Expand Down Expand Up @@ -164,6 +186,7 @@ def init_data(self):


# Situation 3: repeat_times is a tensor
# CINN not support repeat_times is a tensor now
class TestTileOpRank1_tensor(OpTest):
def setUp(self):
self.op_type = "tile"
Expand Down Expand Up @@ -206,9 +229,13 @@ def setUp(self):
self.attrs = {'repeat_times': [2, 1, 4]}
output = np.tile(self.inputs['X'], (2, 1, 4))
self.outputs = {'Out': output}
self.if_enable_cinn()

def if_enable_cinn(self):
self.check_cinn = True

def test_check_output(self):
self.check_output()
self.check_output(check_cinn=self.check_cinn)


class TestTileFP16OP(OpTest):
Expand All @@ -217,22 +244,25 @@ def setUp(self):
self.dtype = np.float16
self.python_api = paddle.tile
self.prim_op_type = "prim"
self.enable_cinn = True
self.public_python_api = paddle.tile
self.init_data()
x = np.random.uniform(10, size=self.ori_shape).astype(self.dtype)
output = np.tile(x, self.repeat_times)
self.inputs = {'X': x}
self.attrs = {'repeat_times': self.repeat_times}
self.outputs = {'Out': output}
self.if_enable_cinn()

def if_enable_cinn(self):
self.check_cinn = True

def init_data(self):
self.dtype = np.float16
self.ori_shape = [100, 4, 5]
self.repeat_times = [2, 1, 4]

def test_check_output(self):
self.check_output()
self.check_output(check_cinn=self.check_cinn)

def test_check_grad(self):
self.check_grad(['X'], 'Out', check_prim=True)
Expand All @@ -256,10 +286,14 @@ def setUp(self):
self.inputs = {'X': convert_float_to_uint16(x)}
self.attrs = {'repeat_times': self.repeat_times}
self.outputs = {'Out': convert_float_to_uint16(output)}
self.if_enable_cinn()

def if_enable_cinn(self):
self.check_cinn = True

def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place)
self.check_output_with_place(place, check_cinn=self.check_cinn)

def init_data(self):
self.dtype = np.uint16
Expand All @@ -280,9 +314,13 @@ def setUp(self):
self.attrs = {'repeat_times': [2, 1, 4]}
output = np.tile(self.inputs['X'], (2, 1, 4))
self.outputs = {'Out': output}
self.if_enable_cinn()

def if_enable_cinn(self):
self.check_cinn = True

def test_check_output(self):
self.check_output()
self.check_output(check_cinn=self.check_cinn)


# Situation 56: input x is Integer
Expand All @@ -296,9 +334,13 @@ def setUp(self):
self.attrs = {'repeat_times': [2, 1, 4]}
output = np.tile(self.inputs['X'], (2, 1, 4))
self.outputs = {'Out': output}
self.if_enable_cinn()

def if_enable_cinn(self):
self.check_cinn = True

def test_check_output(self):
self.check_output()
self.check_output(check_cinn=self.check_cinn)


class TestTileError(unittest.TestCase):
Expand Down
Loading