Skip to content

Commit

Permalink
[CINN] Enable CINN unittest on atan2, tile, top_k, where (#54280)
Browse files Browse the repository at this point in the history
* Enable check_cinn on atan2, tile, top_k and where

* Update cmakelists in legacy_test

* Reformat code

* Enable check_cinn on op take_along_axis legacy test

* Enable check_cinn on pool2d

* Remove check_cinn=False

* Try fix tile test error

* Rename enable_cinn to test_cinn

* Refactor test_tile_op

* Replace all enable_cinn to check_cinn

* Revert pool2d test timeout

* Remove check_prim and use enable_cinn
  • Loading branch information
FisherWY authored Jun 13, 2023
1 parent 1a30fe5 commit cf7cd24
Show file tree
Hide file tree
Showing 7 changed files with 100 additions and 25 deletions.
5 changes: 5 additions & 0 deletions test/legacy_test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -1191,12 +1191,17 @@ set(TEST_CINN_OPS
test_roll_op
test_sum_op
test_elementwise_min_op
test_atan2_op
test_top_k_op
test_where_op
test_take_along_axis_op
test_arg_min_max_op
test_reverse_op
test_flip
test_triangular_solve_op
test_scatter_nd_op
test_strided_slice_op
test_pool2d_op
test_instance_norm_op
test_cumsum_op
test_pad_op
Expand Down
13 changes: 9 additions & 4 deletions test/legacy_test/test_atan2_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ class TestAtan2(OpTest):
def setUp(self):
self.op_type = "atan2"
self.python_api = paddle.atan2
self.check_cinn = True
self.init_dtype()

x1 = np.random.uniform(-1, -0.1, [15, 17]).astype(self.dtype)
Expand All @@ -44,10 +45,10 @@ def setUp(self):
self.outputs = {'Out': out}

def test_check_grad(self):
self.check_grad(['X1', 'X2'], 'Out')
self.check_grad(['X1', 'X2'], 'Out', check_cinn=self.check_cinn)

def test_check_output(self):
self.check_output()
self.check_output(check_cinn=self.check_cinn)

def init_dtype(self):
self.dtype = np.float64
Expand All @@ -67,6 +68,7 @@ def test_check_grad(self):
self.inputs['X2'],
1 / self.inputs['X1'].size,
),
check_cinn=self.check_cinn,
)


Expand Down Expand Up @@ -139,6 +141,7 @@ def setUp(self):
self.op_type = 'atan2'
self.python_api = paddle.atan2
self.dtype = np.uint16
self.check_cinn = True
x1 = np.random.uniform(-1, -0.1, [15, 17]).astype('float32')
x2 = np.random.uniform(0.1, 1, [15, 17]).astype('float32')
out = np.arctan2(x1, x2)
Expand All @@ -151,11 +154,13 @@ def setUp(self):

def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place)
self.check_output_with_place(place, check_cinn=self.check_cinn)

def test_check_grad(self):
place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['X1', 'X2'], 'Out')
self.check_grad_with_place(
place, ['X1', 'X2'], 'Out', check_cinn=self.check_cinn
)


class TestAtan2Error(unittest.TestCase):
Expand Down
18 changes: 15 additions & 3 deletions test/legacy_test/test_pool2d_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -421,7 +421,10 @@ def test_check_output(self):
if self.has_cudnn():
place = core.CUDAPlace(0)
self.check_output_with_place(
place, atol=1e-5, check_dygraph=(not self.use_mkldnn)
place,
atol=1e-5,
check_dygraph=(not self.use_mkldnn),
check_cinn=True,
)
else:
self.check_output(check_dygraph=(not self.use_mkldnn))
Expand All @@ -437,6 +440,7 @@ def test_check_grad(self):
{'X'},
'Out',
check_dygraph=(not self.use_mkldnn),
check_cinn=True,
)
elif self.pool_type != "max":
self.check_grad(
Expand Down Expand Up @@ -586,6 +590,7 @@ def test_check_output(self):
self.check_output_with_place(
place,
check_dygraph=(not self.use_mkldnn),
check_cinn=True,
)

def test_check_grad(self):
Expand All @@ -601,6 +606,7 @@ def test_check_grad(self):
{'X'},
'Out',
check_dygraph=(not self.use_mkldnn),
check_cinn=True,
)

cls_name = "{}_{}".format(parent.__name__, "CUDNNFp16Op")
Expand All @@ -625,6 +631,7 @@ def test_check_output(self):
self.check_output_with_place(
place,
check_dygraph=(not self.use_mkldnn),
check_cinn=True,
)

def test_check_grad(self):
Expand All @@ -640,6 +647,7 @@ def test_check_grad(self):
{'X'},
'Out',
check_dygraph=(not self.use_mkldnn),
check_cinn=True,
)

cls_name = "{}_{}".format(parent.__name__, "Fp16Op")
Expand All @@ -662,6 +670,7 @@ def test_check_output(self):
self.check_output_with_place(
place,
check_dygraph=(not self.use_mkldnn),
check_cinn=True,
)

def test_check_grad(self):
Expand All @@ -672,6 +681,7 @@ def test_check_grad(self):
{'X'},
'Out',
check_dygraph=(not self.use_mkldnn),
check_cinn=True,
)

cls_name = "{}_{}".format(parent.__name__, "Bf16Op")
Expand Down Expand Up @@ -1001,10 +1011,12 @@ def test_check_grad(self):
if self.has_cudnn() and self.pool_type == "max":
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, {'X'}, 'Out', max_relative_error=1.00
place, {'X'}, 'Out', max_relative_error=1.00, check_cinn=True
)
elif self.pool_type == "max":
self.check_grad({'X'}, 'Out', max_relative_error=1.00)
self.check_grad(
{'X'}, 'Out', max_relative_error=1.00, check_cinn=True
)


class TestCase5_channel_last_Max(TestCase5_Max):
Expand Down
12 changes: 8 additions & 4 deletions test/legacy_test/test_take_along_axis_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ def setUp(self):
self.init_data()
self.op_type = "take_along_axis"
self.python_api = paddle.tensor.take_along_axis
self.check_cinn = True
self.xnp = np.random.random(self.x_shape).astype(self.x_type)
self.target = np.take_along_axis(self.xnp, self.index, self.axis)
broadcast_shape_list = list(self.x_shape)
Expand All @@ -42,10 +43,10 @@ def setUp(self):
self.outputs = {'Result': self.target}

def test_check_output(self):
self.check_output()
self.check_output(check_cinn=self.check_cinn)

def test_check_grad(self):
self.check_grad(['Input'], 'Result')
self.check_grad(['Input'], 'Result', check_cinn=self.check_cinn)

def init_data(self):
self.x_type = "float64"
Expand Down Expand Up @@ -81,6 +82,7 @@ def setUp(self):
self.init_data()
self.op_type = "take_along_axis"
self.python_api = paddle.tensor.take_along_axis
self.check_cinn = True
self.xnp = np.random.random(self.x_shape).astype(self.x_type)
self.target = np.take_along_axis(self.xnp, self.index, self.axis)
broadcast_shape_list = list(self.x_shape)
Expand All @@ -99,10 +101,12 @@ def setUp(self):
self.place = core.CUDAPlace(0)

def test_check_output(self):
self.check_output_with_place(self.place)
self.check_output_with_place(self.place, check_cinn=self.check_cinn)

def test_check_grad(self):
self.check_grad_with_place(self.place, ['Input'], 'Result')
self.check_grad_with_place(
self.place, ['Input'], 'Result', check_cinn=self.check_cinn
)

def init_data(self):
self.dtype = np.uint16
Expand Down
58 changes: 50 additions & 8 deletions test/legacy_test/test_tile_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,14 +40,14 @@ def setUp(self):
self.outputs = {'Out': output}

def if_enable_cinn(self):
pass
self.check_cinn = True

def init_data(self):
self.ori_shape = [100]
self.repeat_times = [2]

def test_check_output(self):
self.check_output()
self.check_output(check_cinn=self.check_cinn)

def test_check_grad(self):
self.check_grad(['X'], 'Out', check_prim=True)
Expand All @@ -59,6 +59,7 @@ def init_data(self):
self.repeat_times = []

def if_enable_cinn(self):
self.check_cinn = False
self.enable_cinn = False


Expand All @@ -68,6 +69,7 @@ def init_data(self):
self.repeat_times = [2]

def if_enable_cinn(self):
self.check_cinn = False
self.enable_cinn = False


Expand All @@ -77,6 +79,7 @@ def init_data(self):
self.repeat_times = [2, 3]

def if_enable_cinn(self):
self.check_cinn = False
self.enable_cinn = False


Expand All @@ -86,38 +89,57 @@ def init_data(self):
self.ori_shape = [120]
self.repeat_times = [2, 2]

def if_enable_cinn(self):
self.check_cinn = True


class TestTileOpRank2(TestTileOpRank1):
def init_data(self):
self.ori_shape = [12, 14]
self.repeat_times = [2, 3]

def if_enable_cinn(self):
self.check_cinn = True


class TestTileOpRank3_Corner(TestTileOpRank1):
def init_data(self):
self.ori_shape = (2, 10, 5)
self.repeat_times = (1, 1, 1)

def if_enable_cinn(self):
self.check_cinn = True


class TestTileOpRank3_Corner2(TestTileOpRank1):
def init_data(self):
self.ori_shape = (2, 10, 5)
self.repeat_times = (2, 2)

def if_enable_cinn(self):
self.check_cinn = True


class TestTileOpRank3(TestTileOpRank1):
def init_data(self):
self.ori_shape = (2, 4, 15)
self.repeat_times = (2, 1, 4)

def if_enable_cinn(self):
self.check_cinn = True


class TestTileOpRank4(TestTileOpRank1):
def init_data(self):
self.ori_shape = (2, 4, 5, 7)
self.repeat_times = (3, 2, 1, 2)

def if_enable_cinn(self):
self.check_cinn = True


# Situation 2: repeat_times is a list (with tensor)
# CINN not support repeat_times is a tensor now
class TestTileOpRank1_tensor_attr(OpTest):
def setUp(self):
self.op_type = "tile"
Expand Down Expand Up @@ -164,6 +186,7 @@ def init_data(self):


# Situation 3: repeat_times is a tensor
# CINN not support repeat_times is a tensor now
class TestTileOpRank1_tensor(OpTest):
def setUp(self):
self.op_type = "tile"
Expand Down Expand Up @@ -206,9 +229,13 @@ def setUp(self):
self.attrs = {'repeat_times': [2, 1, 4]}
output = np.tile(self.inputs['X'], (2, 1, 4))
self.outputs = {'Out': output}
self.if_enable_cinn()

def if_enable_cinn(self):
self.check_cinn = True

def test_check_output(self):
self.check_output()
self.check_output(check_cinn=self.check_cinn)


class TestTileFP16OP(OpTest):
Expand All @@ -217,22 +244,25 @@ def setUp(self):
self.dtype = np.float16
self.python_api = paddle.tile
self.prim_op_type = "prim"
self.enable_cinn = True
self.public_python_api = paddle.tile
self.init_data()
x = np.random.uniform(10, size=self.ori_shape).astype(self.dtype)
output = np.tile(x, self.repeat_times)
self.inputs = {'X': x}
self.attrs = {'repeat_times': self.repeat_times}
self.outputs = {'Out': output}
self.if_enable_cinn()

def if_enable_cinn(self):
self.check_cinn = True

def init_data(self):
self.dtype = np.float16
self.ori_shape = [100, 4, 5]
self.repeat_times = [2, 1, 4]

def test_check_output(self):
self.check_output()
self.check_output(check_cinn=self.check_cinn)

def test_check_grad(self):
self.check_grad(['X'], 'Out', check_prim=True)
Expand All @@ -256,10 +286,14 @@ def setUp(self):
self.inputs = {'X': convert_float_to_uint16(x)}
self.attrs = {'repeat_times': self.repeat_times}
self.outputs = {'Out': convert_float_to_uint16(output)}
self.if_enable_cinn()

def if_enable_cinn(self):
self.check_cinn = True

def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place)
self.check_output_with_place(place, check_cinn=self.check_cinn)

def init_data(self):
self.dtype = np.uint16
Expand All @@ -280,9 +314,13 @@ def setUp(self):
self.attrs = {'repeat_times': [2, 1, 4]}
output = np.tile(self.inputs['X'], (2, 1, 4))
self.outputs = {'Out': output}
self.if_enable_cinn()

def if_enable_cinn(self):
self.check_cinn = True

def test_check_output(self):
self.check_output()
self.check_output(check_cinn=self.check_cinn)


# Situation 56: input x is Integer
Expand All @@ -296,9 +334,13 @@ def setUp(self):
self.attrs = {'repeat_times': [2, 1, 4]}
output = np.tile(self.inputs['X'], (2, 1, 4))
self.outputs = {'Out': output}
self.if_enable_cinn()

def if_enable_cinn(self):
self.check_cinn = True

def test_check_output(self):
self.check_output()
self.check_output(check_cinn=self.check_cinn)


class TestTileError(unittest.TestCase):
Expand Down
Loading

0 comments on commit cf7cd24

Please sign in to comment.