diff --git a/paddle/cinn/hlir/op/nn.cc b/paddle/cinn/hlir/op/nn.cc index 757d9788561b7..d5c673434251e 100644 --- a/paddle/cinn/hlir/op/nn.cc +++ b/paddle/cinn/hlir/op/nn.cc @@ -78,8 +78,7 @@ std::shared_ptr StrategyForRelu( std::vector InferShapeForRelu( const std::vector &inputs_shape, const framework::AttrMapType &attrs) { - CHECK(!inputs_shape.empty()) - << "The input's shape is empty! Please check again."; + CHECK(!inputs_shape.empty()) << "The inputs is empty! Please check again."; std::vector res{inputs_shape[0]}; return res; } @@ -2352,8 +2351,7 @@ std::shared_ptr StrategyForSoftmax( std::vector> InferShapeForSoftmax( const std::vector> &inputs_shape, const framework::AttrMapType &attrs) { - CHECK(!inputs_shape.empty() && !inputs_shape[0].empty()) - << "The input's shape size is 0! Please check again."; + CHECK(!inputs_shape.empty()) << "The inputs is empty! Please check again."; std::vector> res{inputs_shape[0]}; return res; } @@ -2433,8 +2431,7 @@ std::shared_ptr StrategyForDropoutInfer( std::vector> InferShapeForDropoutInfer( const std::vector> &inputs_shape, const framework::AttrMapType &attrs) { - CHECK(!inputs_shape.empty()) - << "The input's shape size is 0! Please check again."; + CHECK(!inputs_shape.empty()) << "The inputs is empty! Please check again."; float dropout_prob = 0; std::string dropout_implementation = "downgrade_in_infer"; for (auto &iter : attrs) { diff --git a/test/cinn/ops/test_zero_dim_tensor.py b/test/cinn/ops/test_zero_dim_tensor.py index c446ef7d8c697..f4a6faa10cd4c 100644 --- a/test/cinn/ops/test_zero_dim_tensor.py +++ b/test/cinn/ops/test_zero_dim_tensor.py @@ -575,6 +575,9 @@ def test_check_results(self): create_unit_test( TestUnaryOp, "reciprocal", paddle.reciprocal, "builder.reciprocal" ) +create_unit_test( + TestUnaryOp, "softmax", paddle.nn.functional.softmax, "builder.softmax" +) # acosh requires input value > 1.0, specific init_input instead of using create_unit_test @@ -1118,5 +1121,43 @@ def test_check_results(self): self.check_outputs_and_grads() +@OpTestTool.skip_if( + not is_compiled_with_cuda(), "x86 test will be skipped due to timeout." +) +class TestFlipOp(OpTest): + def setUp(self): + np.random.seed(2023) + self.dtype = "float32" + self.init_input() + + def init_input(self): + self.inputs = { + "x": np.random.randint(-10, 10, []).astype(self.dtype), + } + self.target_shape = () + + def build_paddle_program(self, target): + x = paddle.to_tensor(self.inputs["x"], stop_gradient=False) + out = paddle.flip(x, axis=[]) + + self.paddle_outputs = [out] + + def build_cinn_program(self, target): + builder = NetBuilder("flip_op") + x = builder.create_input( + cinn_dtype_convert(self.dtype), self.inputs["x"].shape, "x" + ) + out = builder.flip(x, []) + + prog = builder.build() + res = self.get_cinn_output(prog, target, [x], [self.inputs["x"]], [out]) + + self.cinn_outputs = res + self.assertEqual(res[0].shape, self.target_shape) + + def test_check_results(self): + self.check_outputs_and_grads() + + if __name__ == "__main__": unittest.main()