From 6f106f9084538025dbca889e24dcccb4777394a7 Mon Sep 17 00:00:00 2001 From: zxcd <228587199@qq.com> Date: Thu, 9 May 2024 15:07:36 +0800 Subject: [PATCH] add type promotion for complex and real number. (#63842) * add type promotion for complex and real number. * fix * reduce api support * add more api support * fix * fix * remove matmul * add T+S logic. * fix bug * fix unittest * fix * fix * fix unittest * fix gumbel * rm print * fix more unittests. * fix test_llama_group_log_softmax.py * fix bug, and add 0-d + 0-d logic. * rm print * fix behavior of bool and int * add unittest for all type promotion. * rm unintest which is unsupport dtype * fix * fix * add error unittest * fix increase unittest * bug fix * fixed by comment * remove useless code. * fix * fix * fix TypePromotionForZeroDimTensor * add inplace API support, add special case can skip type promotion (add x=float32,y=float16/bfloat16). * add broatcast support for MultiPrecisionAddKernelImpl. --- .../forwards/multiply_fwd_func.cc | 20 +- .../generator/eager_gen.py | 83 +- paddle/fluid/eager/type_promotion_utils.h | 11 + paddle/fluid/pybind/eager_math_op_patch.cc | 1091 +++--- paddle/fluid/pybind/pybind.cc | 14 +- paddle/phi/common/type_promotion.h | 75 +- paddle/phi/kernels/kps/elementwise_kernel.cu | 10 +- python/paddle/base/framework.py | 56 +- python/paddle/base/layers/math_op_patch.py | 68 +- python/paddle/distribution/gumbel.py | 13 +- python/paddle/metric/metrics.py | 2 +- python/paddle/nn/functional/loss.py | 15 +- python/paddle/tensor/linalg.py | 7 +- python/paddle/tensor/logic.py | 5 - python/paddle/tensor/math.py | 12 +- python/paddle/tensor/stat.py | 6 +- test/auto_parallel/test_to_static.py | 4 +- .../test_math_op_patch_var_base.py | 4 +- ...est_save_inference_model_conditional_op.py | 2 +- .../legacy_test/test_sparse_sum_op.py | 10 +- ...est_tensor_scalar_type_promotion_static.py | 434 ++- .../legacy_test/test_tensor_type_promotion.py | 3372 ++++++++++++++++- test/dygraph_to_static/test_break_continue.py | 10 +- test/dygraph_to_static/test_for_enumerate.py | 6 +- test/indexing/test_setitem.py | 2 +- .../symbolic/test_llama_group_log_softmax.py | 4 +- test/legacy_test/test_elementwise_add_op.py | 2 +- test/legacy_test/test_elementwise_sub_op.py | 4 +- test/legacy_test/test_modelaverage.py | 12 +- test/legacy_test/test_multiply.py | 2 +- test/legacy_test/test_sparse_addmm_op.py | 8 +- test/legacy_test/test_sparse_matmul_op.py | 10 +- test/legacy_test/test_sparse_mv_op.py | 8 +- ...st_tensor_scalar_type_promotion_dynamic.py | 423 ++- test/legacy_test/test_trapezoid.py | 2 +- test/sot/test_18_tensor_method.py | 7 +- 36 files changed, 5013 insertions(+), 801 deletions(-) diff --git a/paddle/fluid/eager/api/manual/eager_manual/forwards/multiply_fwd_func.cc b/paddle/fluid/eager/api/manual/eager_manual/forwards/multiply_fwd_func.cc index aa18f8cd4acb8..cfea756cf02d5 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/forwards/multiply_fwd_func.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/forwards/multiply_fwd_func.cc @@ -70,7 +70,7 @@ paddle::Tensor multiply_ad_func(const paddle::Tensor& x, } // Type promotion Logic - if (phi::NeedTypePromotion(x.dtype(), y.dtype())) { + if (phi::NeedTypePromotion("multiply", x.dtype(), y.dtype())) { VLOG(5) << "got different data type, run type promotion automatically."; LOG_FIRST_N(WARNING, 1) << "got different data type, run type promotion " @@ -247,6 +247,22 @@ paddle::Tensor& multiply__ad_func(paddle::Tensor& x, // NOLINT VLOG(5) << " No AMP for multiply__ad_func because it is a inplace or cast api. "; + + // Type promotion Logic + if (phi::NeedTypePromotion("multiply_", x.dtype(), y.dtype())) { + VLOG(5) << "got different data type, run type promotion automatically."; + LOG_FIRST_N(WARNING, 1) + << "got different data type, run type promotion " + "automatically, this may cause data type been changed."; + auto op_name = phi::TransToFluidOpName("multiply_"); + auto promotion_type = phi::GetPromoteDtype(op_name, x.dtype(), y.dtype()); + + x = egr::PromoteCastInplace("x", x, promotion_type); + auto new_y = egr::PromoteCast("y", y, promotion_type); + + return multiply__ad_func(x, new_y); + } + // Layout autotune if (egr::Controller::Instance().UseLayoutAutoTune()) { @@ -424,7 +440,7 @@ paddle::Tensor multiply_ad_func(const paddle::Tensor& x, } // Type promotion Logic - if (phi::NeedTypePromotion(x.dtype(), y.dtype())) { + if (phi::NeedTypePromotion("multiply", x.dtype(), y.dtype())) { VLOG(5) << "got different data type, run type promotion automatically."; LOG_FIRST_N(WARNING, 1) << "got different data type, run type promotion " diff --git a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py index c272e09a9579f..d7379ffb4e444 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py +++ b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py @@ -85,11 +85,50 @@ type_promote_white_list = { "add": ["x", "y"], "subtract": ["x", "y"], + "divide": ["x", "y"], + "floor_divide": ["x", "y"], + "elementwise_pow": ["x", "y"], "where": ["x", "y"], + "equal": ["x", "y"], + "not_equal": ["x", "y"], + "less_than": ["x", "y"], + "less_equal": ["x", "y"], + "greater_than": ["x", "y"], + "greater_equal": ["x", "y"], + "logical_and": ["x", "y"], + "logical_or": ["x", "y"], + "logical_xor": ["x", "y"], + "fmax": ["x", "y"], + "fmin": ["x", "y"], + "maximum": ["x", "y"], + "minimum": ["x", "y"], + "remainder": ["x", "y"], + "huber_loss": ["input", "label"], + "nextafter": ["x", "y"], + "atan2": ["x", "y"], +} + +type_promote_inplace_white_list = { + "add_": ["x", "y"], + "subtract_": ["x", "y"], + "divide_": ["x", "y"], + "floor_divide_": ["x", "y"], + "where_": ["x", "y"], + "equal_": ["x", "y"], + "not_equal_": ["x", "y"], + "less_than_": ["x", "y"], + "less_equal_": ["x", "y"], + "greater_than_": ["x", "y"], + "greater_equal_": ["x", "y"], + "logical_and_": ["x", "y"], + "logical_or_": ["x", "y"], + "logical_xor_": ["x", "y"], + "remainder_": ["x", "y"], } # dict of special api that forward api's output will affect backward api's output # backward api's output usually affected by backward api's input + special_prune_dict = { "matmul_grad": {"x": "grad_y", "y": "grad_x"}, } @@ -537,13 +576,13 @@ class {} : public egr::GradNodeBase {{ }} """ -TYPE_PROMOTION_LOGIC_TEMPLATE = """ if (phi::NeedTypePromotion({x}.dtype(), {y}.dtype())) {{ +TYPE_PROMOTION_LOGIC_TEMPLATE = """ if (phi::NeedTypePromotion({op_func_name}, {x}.dtype(), {y}.dtype())) {{ VLOG(5) << "got different data type, run type promotion automatically."; LOG_FIRST_N(WARNING, 1) << "got different data type, run type promotion automatically, this may cause data type been changed."; {op_name} auto promotion_type = phi::GetPromoteDtype(op_name, {x}.dtype(), {y}.dtype()); - auto new_{x} = egr::PromoteCast("{x}", {x}, promotion_type); + {x_cast} auto new_{y} = egr::PromoteCast("{y}", {y}, promotion_type); {return_value} @@ -1511,6 +1550,18 @@ def GenerateForwardDefinitionAndDeclaration(self, is_inplaced): type_promote_inputs_call_list[pos] = f"new_{name}" else: type_promote_inputs_call_list[pos] = f"{name}" + elif forward_api_name in type_promote_inplace_white_list: + if name in type_promote_inplace_white_list[forward_api_name]: + if ( + is_inplaced + and forward_inplace_map + and name in forward_inplace_map + ): + type_promote_inputs_call_list[pos] = f"{name}" + else: + type_promote_inputs_call_list[pos] = f"new_{name}" + else: + type_promote_inputs_call_list[pos] = f"{name}" if IsPlainTensorType(ttype): if is_optional: if ( @@ -1601,6 +1652,7 @@ def GenerateForwardDefinitionAndDeclaration(self, is_inplaced): for name, atype, default_val, pos in forward_attrs_list: inputs_call_list[pos] = name amp_inputs_call_list[pos] = name + type_promote_inputs_call_list[pos] = name if default_val is not None: inputs_args_declaration_list[ pos @@ -1846,6 +1898,7 @@ def GenerateForwardDefinitionAndDeclaration(self, is_inplaced): # Forward type promotion logic if forward_api_name in type_promote_white_list: # only support two inputs + op_func_name = f"\"{forward_api_name}\"" x = type_promote_white_list[forward_api_name][0] y = type_promote_white_list[forward_api_name][1] type_promote_inputs_call_args_str = ", ".join( @@ -1853,9 +1906,35 @@ def GenerateForwardDefinitionAndDeclaration(self, is_inplaced): ) type_promote_call_list = f"return {forward_ad_function_name}({type_promote_inputs_call_args_str});" + x_cast = f"auto new_{x} = egr::PromoteCast(\"{x}\", {x}, promotion_type);" + + type_promotion_logic_str = TYPE_PROMOTION_LOGIC_TEMPLATE.format( + op_func_name=op_func_name, + x=x, + y=y, + x_cast=x_cast, + op_name=kernel_trans2_op_name_str, + return_value=type_promote_call_list, + ) + elif forward_api_name in type_promote_inplace_white_list: + # only support two inputs + op_func_name = f"\"{forward_api_name}\"" + x = type_promote_inplace_white_list[forward_api_name][0] + y = type_promote_inplace_white_list[forward_api_name][1] + type_promote_inputs_call_args_str = ", ".join( + type_promote_inputs_call_list + ) + type_promote_call_list = f"return {forward_ad_function_name}({type_promote_inputs_call_args_str});" + + x_cast = ( + f"{x} = egr::PromoteCastInplace(\"{x}\", {x}, promotion_type);" + ) + type_promotion_logic_str = TYPE_PROMOTION_LOGIC_TEMPLATE.format( + op_func_name=op_func_name, x=x, y=y, + x_cast=x_cast, op_name=kernel_trans2_op_name_str, return_value=type_promote_call_list, ) diff --git a/paddle/fluid/eager/type_promotion_utils.h b/paddle/fluid/eager/type_promotion_utils.h index 3ef732bac78bf..7ab9965cd15c4 100644 --- a/paddle/fluid/eager/type_promotion_utils.h +++ b/paddle/fluid/eager/type_promotion_utils.h @@ -30,4 +30,15 @@ inline paddle::Tensor PromoteCast(const std::string& input_name, } } +inline paddle::Tensor PromoteCastInplace(const std::string& input_name, + paddle::Tensor& input, // NOLINT + const phi::DataType& dst_dtype, + bool trace_backward = true) { + if (input.dtype() != dst_dtype) { + return paddle::experimental::cast_(input, dst_dtype); + } else { + return input; + } +} + } // namespace egr diff --git a/paddle/fluid/pybind/eager_math_op_patch.cc b/paddle/fluid/pybind/eager_math_op_patch.cc index 17b36e9237e78..90cfe1db9f4bb 100644 --- a/paddle/fluid/pybind/eager_math_op_patch.cc +++ b/paddle/fluid/pybind/eager_math_op_patch.cc @@ -169,16 +169,13 @@ paddle::Tensor CallScalarFuction(const paddle::Tensor& self_tensor, double other, std::string op_type) { paddle::Tensor ret; + // scale_ad_func need sclar and bias with float type. if (op_type == "add" || op_type == "radd") { - ret = scale_ad_func( - self_tensor, phi::Scalar(1.0), static_cast(other), true); + ret = scale_ad_func(self_tensor, phi::Scalar(1.0), other, true); } else if (op_type == "sub") { - ret = scale_ad_func( - self_tensor, phi::Scalar(1.0), static_cast(-other), true); - + ret = scale_ad_func(self_tensor, phi::Scalar(1.0), -other, true); } else if (op_type == "rsub") { - ret = scale_ad_func( - self_tensor, phi::Scalar(-1.0), static_cast(other), true); + ret = scale_ad_func(self_tensor, phi::Scalar(-1.0), other, true); } else if (op_type == "mul") { ret = scale_ad_func(self_tensor, phi::Scalar(other), 0.0, true); } else if (op_type == "div") { @@ -190,6 +187,42 @@ paddle::Tensor CallScalarFuction(const paddle::Tensor& self_tensor, return ret; } +void TypePromotionForZeroDimTensor(std::string func, + paddle::Tensor& self_tensor, // NOLINT + paddle::Tensor& other_tensor // NOLINT +) { + if ((self_tensor.shape().size() == 0 || other_tensor.shape().size() == 0) && + self_tensor.dtype() != other_tensor.dtype()) { + VLOG(5) << "got 0-d tensor and need to do type promotion, x: " + << self_tensor.dtype() << " y: " << other_tensor.dtype(); + + phi::DataType promote_type; + // different major types or both 0-d tensor follow with T+T rule. + if (!is_common_dtype_for_scalar(self_tensor.dtype(), + other_tensor.dtype()) || + (self_tensor.shape().size() == 0 && other_tensor.shape().size() == 0)) { + promote_type = + GetPromoteDtype(func, self_tensor.dtype(), other_tensor.dtype()); + } else { + // common major types follow with tensor: int32(tensor) + int64(scalar) + // = int32 + if (self_tensor.shape().size() == 0) { + promote_type = other_tensor.dtype(); + } else { + promote_type = self_tensor.dtype(); + } + } + if (self_tensor.dtype() != promote_type) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func(self_tensor, promote_type); + } + if (other_tensor.dtype() != promote_type) { + eager_gil_scoped_release guard; + other_tensor = cast_ad_func(other_tensor, promote_type); + } + } +} + static PyObject* tensor__add__method(TensorObject* self, PyObject* args, PyObject* kwargs) { @@ -211,34 +244,51 @@ static PyObject* tensor__add__method(TensorObject* self, // 1. scalar exists cases if (PyFloat_Check(other_obj) || PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { - double other = 0.0; if (PyFloat_Check(other_obj)) { - other = CastPyArg2Double(other_obj, "__add__", 0); if (_supported_int_dtype_.find(self_tensor.dtype()) != _supported_int_dtype_.end()) { eager_gil_scoped_release guard; self_tensor = cast_ad_func(self_tensor, DataType::FLOAT32); } - } else if (PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { - other = CastPyArg2Double(other_obj, "__add__", 0); + } else if (PyCheckInteger(other_obj) && + self_tensor.dtype() == DataType::BOOL) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func(self_tensor, DataType::INT64); } + double other = CastPyArg2Double(other_obj, "__add__", 0); { eager_gil_scoped_release guard; ret = CallScalarFuction(self_tensor, other, "add"); } return ToPyObject(ret); + } else if (PyComplex_Check(other_obj)) { + if (is_support_complex(self_tensor.dtype()) == false) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func( + self_tensor, promoteTypes(self_tensor.dtype(), DataType::COMPLEX64)); + } } // 2. create or get tensor for other_obj paddle::Tensor other_tensor; + if (PyCheckTensor(other_obj)) { - auto& self_tensor_ref = self->tensor; - auto& other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + auto& self_tensor_ref_addr = self->tensor; + auto& other_tensor_ref_addr = CastPyArg2Tensor(other_obj, 0); const phi::distributed::ProcessMesh* mesh = nullptr; - if (InputsContainDistTensor(&mesh, self_tensor_ref, other_tensor_ref)) { - ConvertAllInputsToDistTensor(mesh, self_tensor_ref, other_tensor_ref); + if (InputsContainDistTensor( + &mesh, self_tensor_ref_addr, other_tensor_ref_addr)) { + ConvertAllInputsToDistTensor( + mesh, self_tensor_ref_addr, other_tensor_ref_addr); } + + auto self_tensor_ref = self->tensor; + auto other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + // got 0-d tensor, and need type promotion. The rules same with Tensor + + // Scalar. + TypePromotionForZeroDimTensor("add", self_tensor_ref, other_tensor_ref); + self_tensor = self_tensor_ref; other_tensor = other_tensor_ref; } else { @@ -263,39 +313,7 @@ static PyObject* tensor__add__method(TensorObject* self, } } - // 3. promote types or unify right var type to left var, float type promotion - // mv to add_ad_func - phi::DataType lhs_dtype = self_tensor.dtype(); - phi::DataType rhs_dtype = other_tensor.dtype(); - if (lhs_dtype != rhs_dtype && !phi::NeedTypePromotion(lhs_dtype, rhs_dtype)) { - // note: only op_type in _supported_promote_complex_types_ should promote - // dtype - if (_complex_dtypes.find(lhs_dtype) != _complex_dtypes.end() || - _complex_dtypes.find(rhs_dtype) != _complex_dtypes.end()) { - phi::DataType promote_dtype = - framework::TransToPhiDataType(framework::PromoteTypesIfComplexExists( - framework::TransToProtoVarType(lhs_dtype), - framework::TransToProtoVarType(rhs_dtype))); - if (lhs_dtype != promote_dtype) { - // cast - eager_gil_scoped_release guard; - self_tensor = cast_ad_func(self_tensor, promote_dtype); - } - if (rhs_dtype != promote_dtype) { - eager_gil_scoped_release guard; - other_tensor = cast_ad_func(other_tensor, promote_dtype); - } - } else { - VLOG(6) << "The dtype of left and right Tensor are not the same, left " - "dtype is " - << lhs_dtype << ", but right dtype is " << rhs_dtype - << ", the right dtype will convert to " << lhs_dtype; - eager_gil_scoped_release guard; - other_tensor = cast_ad_func(other_tensor, lhs_dtype); - } - } - - // 4. calculation + // 3. calculation VLOG(6) << "Calling add_ad_func in tensor__add__method"; { @@ -329,34 +347,52 @@ static PyObject* tensor__sub__method(TensorObject* self, // 1. scalar exists cases if (PyFloat_Check(other_obj) || PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { - double other = 0.0; if (PyFloat_Check(other_obj)) { - other = CastPyArg2Double(other_obj, "__sub__", 0); if (_supported_int_dtype_.find(self_tensor.dtype()) != _supported_int_dtype_.end()) { eager_gil_scoped_release guard; self_tensor = cast_ad_func(self_tensor, DataType::FLOAT32); } - } else if (PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { - other = CastPyArg2Double(other_obj, "__sub__", 0); + } else if (PyCheckInteger(other_obj) && + self_tensor.dtype() == DataType::BOOL) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func(self_tensor, DataType::INT64); } + + double other = CastPyArg2Double(other_obj, "__sub__", 0); { eager_gil_scoped_release guard; ret = CallScalarFuction(self_tensor, other, "sub"); } return ToPyObject(ret); + } else if (PyComplex_Check(other_obj)) { + if (is_support_complex(self_tensor.dtype()) == false) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func( + self_tensor, promoteTypes(self_tensor.dtype(), DataType::COMPLEX64)); + } } // 2. create or get tensor for other_obj paddle::Tensor other_tensor; if (PyCheckTensor(other_obj)) { - auto& self_tensor_ref = self->tensor; - auto& other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + auto& self_tensor_ref_addr = self->tensor; + auto& other_tensor_ref_addr = CastPyArg2Tensor(other_obj, 0); const phi::distributed::ProcessMesh* mesh = nullptr; - if (InputsContainDistTensor(&mesh, self_tensor_ref, other_tensor_ref)) { - ConvertAllInputsToDistTensor(mesh, self_tensor_ref, other_tensor_ref); + if (InputsContainDistTensor( + &mesh, self_tensor_ref_addr, other_tensor_ref_addr)) { + ConvertAllInputsToDistTensor( + mesh, self_tensor_ref_addr, other_tensor_ref_addr); } + + auto self_tensor_ref = self->tensor; + auto other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + // got 0-d tensor, and need type promotion. The rules same with Tensor + + // Scalar. + TypePromotionForZeroDimTensor( + "subtract", self_tensor_ref, other_tensor_ref); + self_tensor = self_tensor_ref; other_tensor = other_tensor_ref; } else { @@ -381,37 +417,7 @@ static PyObject* tensor__sub__method(TensorObject* self, } } - // 3. promote types or unify right var type to left var, float type promotion - // mv to subtract_ad_func - phi::DataType lhs_dtype = self_tensor.dtype(); - phi::DataType rhs_dtype = other_tensor.dtype(); - if (lhs_dtype != rhs_dtype && !phi::NeedTypePromotion(lhs_dtype, rhs_dtype)) { - if (_complex_dtypes.find(lhs_dtype) != _complex_dtypes.end() || - _complex_dtypes.find(rhs_dtype) != _complex_dtypes.end()) { - phi::DataType promote_dtype = - framework::TransToPhiDataType(framework::PromoteTypesIfComplexExists( - framework::TransToProtoVarType(lhs_dtype), - framework::TransToProtoVarType(rhs_dtype))); - if (lhs_dtype != promote_dtype) { - // cast - eager_gil_scoped_release guard; - self_tensor = cast_ad_func(self_tensor, promote_dtype); - } - if (rhs_dtype != promote_dtype) { - eager_gil_scoped_release guard; - other_tensor = cast_ad_func(other_tensor, promote_dtype); - } - } else { - VLOG(6) << "The dtype of left and right Tensor are not the same, left " - "dtype is " - << lhs_dtype << ", but right dtype is " << rhs_dtype - << ", the right dtype will convert to " << lhs_dtype; - eager_gil_scoped_release guard; - other_tensor = cast_ad_func(other_tensor, lhs_dtype); - } - } - - // 4. calculation + // 3. calculation VLOG(6) << "Calling subtract_ad_func in tensor__sub__method"; { eager_gil_scoped_release guard; @@ -444,33 +450,51 @@ static PyObject* tensor__rsub__method(TensorObject* self, // 1. scalar exists cases if (PyFloat_Check(other_obj) || PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { - double other = 0.0; if (PyFloat_Check(other_obj)) { - other = CastPyArg2Double(other_obj, "__rsub__", 0); if (_supported_int_dtype_.find(self_tensor.dtype()) != _supported_int_dtype_.end()) { eager_gil_scoped_release guard; self_tensor = cast_ad_func(self_tensor, DataType::FLOAT32); } - } else if (PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { - other = CastPyArg2Double(other_obj, "__rsub__", 0); + } else if (PyCheckInteger(other_obj) && + self_tensor.dtype() == DataType::BOOL) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func(self_tensor, DataType::INT64); } + + double other = CastPyArg2Double(other_obj, "__rsub__", 0); { eager_gil_scoped_release guard; ret = CallScalarFuction(self_tensor, other, "rsub"); } return ToPyObject(ret); + } else if (PyComplex_Check(other_obj)) { + if (is_support_complex(self_tensor.dtype()) == false) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func( + self_tensor, promoteTypes(self_tensor.dtype(), DataType::COMPLEX64)); + } } // 2. create or get tensor for other_obj paddle::Tensor other_tensor; if (PyCheckTensor(other_obj)) { - auto& other_tensor_ref = CastPyArg2Tensor(other_obj, 0); - auto& self_tensor_ref = self->tensor; + auto& self_tensor_ref_addr = self->tensor; + auto& other_tensor_ref_addr = CastPyArg2Tensor(other_obj, 0); const phi::distributed::ProcessMesh* mesh = nullptr; - if (InputsContainDistTensor(&mesh, self_tensor_ref, other_tensor_ref)) { - ConvertAllInputsToDistTensor(mesh, self_tensor_ref, other_tensor_ref); + if (InputsContainDistTensor( + &mesh, self_tensor_ref_addr, other_tensor_ref_addr)) { + ConvertAllInputsToDistTensor( + mesh, self_tensor_ref_addr, other_tensor_ref_addr); } + + auto other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + auto self_tensor_ref = self->tensor; + // got 0-d tensor, and need type promotion. The rules same with Tensor + + // Scalar. + TypePromotionForZeroDimTensor( + "subtract", self_tensor_ref, other_tensor_ref); + self_tensor = self_tensor_ref; other_tensor = other_tensor_ref; } else { @@ -495,37 +519,7 @@ static PyObject* tensor__rsub__method(TensorObject* self, } } - // 3. promote types or unify right var type to left var, float type promotion - // mv to subtract_ad_func - phi::DataType lhs_dtype = self_tensor.dtype(); - phi::DataType rhs_dtype = other_tensor.dtype(); - if (lhs_dtype != rhs_dtype && !phi::NeedTypePromotion(lhs_dtype, rhs_dtype)) { - if (_complex_dtypes.find(lhs_dtype) != _complex_dtypes.end() || - _complex_dtypes.find(rhs_dtype) != _complex_dtypes.end()) { - phi::DataType promote_dtype = - framework::TransToPhiDataType(framework::PromoteTypesIfComplexExists( - framework::TransToProtoVarType(lhs_dtype), - framework::TransToProtoVarType(rhs_dtype))); - if (lhs_dtype != promote_dtype) { - // cast - eager_gil_scoped_release guard; - self_tensor = cast_ad_func(self_tensor, promote_dtype); - } - if (rhs_dtype != promote_dtype) { - eager_gil_scoped_release guard; - other_tensor = cast_ad_func(other_tensor, promote_dtype); - } - } else { - VLOG(6) << "The dtype of left and right Tensor are not the same, left " - "dtype is " - << lhs_dtype << ", but right dtype is " << rhs_dtype - << ", the right dtype will convert to " << lhs_dtype; - eager_gil_scoped_release guard; - other_tensor = cast_ad_func(other_tensor, lhs_dtype); - } - } - - // 4. calculation + // 3. calculation VLOG(6) << "Calling subtract_ad_func in tensor__rsub__method"; { eager_gil_scoped_release guard; @@ -559,22 +553,30 @@ static PyObject* tensor__mul__method(TensorObject* self, // 1. scalar exists cases if (PyFloat_Check(other_obj) || PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { - double other = 0.0; if (PyFloat_Check(other_obj)) { - other = CastPyArg2Double(other_obj, "__mul__", 0); if (_supported_int_dtype_.find(self_tensor.dtype()) != _supported_int_dtype_.end()) { eager_gil_scoped_release guard; self_tensor = cast_ad_func(self_tensor, DataType::FLOAT32); } - } else if (PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { - other = CastPyArg2Double(other_obj, "__mul__", 0); + } else if (PyCheckInteger(other_obj) && + self_tensor.dtype() == DataType::BOOL) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func(self_tensor, DataType::INT64); } + + double other = CastPyArg2Double(other_obj, "__mul__", 0); { eager_gil_scoped_release guard; ret = CallScalarFuction(self_tensor, other, "mul"); } return ToPyObject(ret); + } else if (PyComplex_Check(other_obj)) { + if (is_support_complex(self_tensor.dtype()) == false) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func( + self_tensor, promoteTypes(self_tensor.dtype(), DataType::COMPLEX64)); + } } // 2. create or get tensor for other_obj @@ -582,12 +584,23 @@ static PyObject* tensor__mul__method(TensorObject* self, // if one of the input is numpy or scalar, no need to do inplace cast. paddle::Tensor other_tensor; if (PyCheckTensor(other_obj)) { - auto& self_tensor_ref = self->tensor; - auto& other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + auto& self_tensor_ref_addr = self->tensor; + auto& other_tensor_ref_addr = CastPyArg2Tensor(other_obj, 0); const phi::distributed::ProcessMesh* mesh = nullptr; - if (InputsContainDistTensor(&mesh, self_tensor_ref, other_tensor_ref)) { - ConvertAllInputsToDistTensor(mesh, self_tensor_ref, other_tensor_ref); + if (InputsContainDistTensor( + &mesh, self_tensor_ref_addr, other_tensor_ref_addr)) { + ConvertAllInputsToDistTensor( + mesh, self_tensor_ref_addr, other_tensor_ref_addr); } + + auto self_tensor_ref = self->tensor; + auto other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + + // got 0-d tensor, and need type promotion. The rules same with Tensor + + // Scalar. + TypePromotionForZeroDimTensor( + "multiply", self_tensor_ref, other_tensor_ref); + self_tensor = self_tensor_ref; other_tensor = other_tensor_ref; } else { @@ -618,39 +631,7 @@ static PyObject* tensor__mul__method(TensorObject* self, } } - // 3. promote types or unify right var type to left var, float type promotion - // mv to multiply_ad_func - phi::DataType lhs_dtype = self_tensor.dtype(); - phi::DataType rhs_dtype = other_tensor.dtype(); - if (lhs_dtype != rhs_dtype && !phi::NeedTypePromotion(lhs_dtype, rhs_dtype)) { - // note: only op_type in _supported_promote_complex_types_ should promote - // dtype - if (_complex_dtypes.find(lhs_dtype) != _complex_dtypes.end() || - _complex_dtypes.find(rhs_dtype) != _complex_dtypes.end()) { - phi::DataType promote_dtype = - framework::TransToPhiDataType(framework::PromoteTypesIfComplexExists( - framework::TransToProtoVarType(lhs_dtype), - framework::TransToProtoVarType(rhs_dtype))); - if (lhs_dtype != promote_dtype) { - // cast - eager_gil_scoped_release guard; - self_tensor = cast_ad_func(self_tensor, promote_dtype); - } - if (rhs_dtype != promote_dtype) { - eager_gil_scoped_release guard; - other_tensor = cast_ad_func(other_tensor, promote_dtype); - } - } else { - VLOG(6) << "The dtype of left and right Tensor are not the same, left " - "dtype is " - << lhs_dtype << ", but right dtype is " << rhs_dtype - << ", the right dtype will convert to " << lhs_dtype; - eager_gil_scoped_release guard; - other_tensor = cast_ad_func(other_tensor, lhs_dtype); - } - } - - // 4. calculation + // 3. calculation VLOG(6) << "Calling multiply_ad_func in tensor__mul__method"; { eager_gil_scoped_release guard; @@ -685,33 +666,44 @@ static PyObject* tensor__div__method(TensorObject* self, // 1. scalar exists cases if (PyFloat_Check(other_obj) || PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { - double other = 0.0; - if (PyFloat_Check(other_obj)) { - other = CastPyArg2Double(other_obj, "__div__", 0); - } else if (PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { - other = CastPyArg2Double(other_obj, "__div__", 0); - } if (_supported_int_dtype_.find(self_tensor.dtype()) != _supported_int_dtype_.end()) { eager_gil_scoped_release guard; self_tensor = cast_ad_func(self_tensor, DataType::FLOAT32); } + + double other = CastPyArg2Double(other_obj, "__div__", 0); { eager_gil_scoped_release guard; ret = CallScalarFuction(self_tensor, other, "div"); } return ToPyObject(ret); + } else if (PyComplex_Check(other_obj)) { + if (is_support_complex(self_tensor.dtype()) == false) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func( + self_tensor, promoteTypes(self_tensor.dtype(), DataType::COMPLEX64)); + } } // 2. create or get tensor for other_obj paddle::Tensor other_tensor; if (PyCheckTensor(other_obj)) { - auto& self_tensor_ref = self->tensor; - auto& other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + auto& self_tensor_ref_addr = self->tensor; + auto& other_tensor_ref_addr = CastPyArg2Tensor(other_obj, 0); const phi::distributed::ProcessMesh* mesh = nullptr; - if (InputsContainDistTensor(&mesh, self_tensor_ref, other_tensor_ref)) { - ConvertAllInputsToDistTensor(mesh, self_tensor_ref, other_tensor_ref); + if (InputsContainDistTensor( + &mesh, self_tensor_ref_addr, other_tensor_ref_addr)) { + ConvertAllInputsToDistTensor( + mesh, self_tensor_ref_addr, other_tensor_ref_addr); } + + auto self_tensor_ref = self->tensor; + auto other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + // got 0-d tensor, and need type promotion. The rules same with Tensor + + // Scalar. + TypePromotionForZeroDimTensor("divide", self_tensor_ref, other_tensor_ref); + self_tensor = self_tensor_ref; other_tensor = other_tensor_ref; } else { @@ -740,46 +732,16 @@ static PyObject* tensor__div__method(TensorObject* self, } } - // 3. promote types or unify right var type to left var - phi::DataType lhs_dtype = self_tensor.dtype(); - phi::DataType rhs_dtype = other_tensor.dtype(); - if (lhs_dtype != rhs_dtype) { - // note: only op_type in _supported_promote_complex_types_ should promote - // dtype - if (_complex_dtypes.find(lhs_dtype) != _complex_dtypes.end() || - _complex_dtypes.find(rhs_dtype) != _complex_dtypes.end()) { - phi::DataType promote_dtype = - framework::TransToPhiDataType(framework::PromoteTypesIfComplexExists( - framework::TransToProtoVarType(lhs_dtype), - framework::TransToProtoVarType(rhs_dtype))); - if (lhs_dtype != promote_dtype) { - // cast - eager_gil_scoped_release guard; - self_tensor = cast_ad_func(self_tensor, promote_dtype); - } - if (rhs_dtype != promote_dtype) { - eager_gil_scoped_release guard; - other_tensor = cast_ad_func(other_tensor, promote_dtype); - } - } else { - VLOG(6) << "The dtype of left and right Tensor are not the same, left " - "dtype is " - << lhs_dtype << ", but right dtype is " << rhs_dtype - << ", the right dtype will convert to " << lhs_dtype; + // 3. promote types or unify right var type to left var, float type promotion + // mv to divide_ad_func + if (self_tensor.dtype() == other_tensor.dtype()) { + if (_supported_int_dtype_.find(self_tensor.dtype()) != + _supported_int_dtype_.end()) { eager_gil_scoped_release guard; - other_tensor = cast_ad_func(other_tensor, lhs_dtype); + self_tensor = cast_ad_func(self_tensor, DataType::FLOAT32); + other_tensor = cast_ad_func(other_tensor, DataType::FLOAT32); } } - if (_supported_int_dtype_.find(self_tensor.dtype()) != - _supported_int_dtype_.end()) { - eager_gil_scoped_release guard; - self_tensor = cast_ad_func(self_tensor, DataType::FLOAT32); - } - if (_supported_int_dtype_.find(other_tensor.dtype()) != - _supported_int_dtype_.end()) { - eager_gil_scoped_release guard; - other_tensor = cast_ad_func(other_tensor, DataType::FLOAT32); - } // 4. calculation VLOG(6) << "Calling divide_ad_func in tensor__div__method"; @@ -814,43 +776,40 @@ static PyObject* tensor__rdiv__method(TensorObject* self, // 1. scalar exists cases // there is no scalar_div function for __rdiv__ and __rtruediv__ - double other_double = 0.0; - bool has_other_double = false; if (PyFloat_Check(other_obj) || PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { - if (PyFloat_Check(other_obj)) { // NOLINT - other_double = CastPyArg2Double(other_obj, "__rdiv__", 0); - has_other_double = true; - } else if (PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { // NOLINT - other_double = CastPyArg2Double(other_obj, "__rdiv__", 0); - has_other_double = true; - } if (_supported_int_dtype_.find(self_tensor.dtype()) != _supported_int_dtype_.end()) { eager_gil_scoped_release guard; self_tensor = cast_ad_func(self_tensor, DataType::FLOAT32); } + } else if (PyComplex_Check(other_obj)) { + if (is_support_complex(self_tensor.dtype()) == false) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func( + self_tensor, promoteTypes(self_tensor.dtype(), DataType::COMPLEX64)); + } } // 2. create or get tensor for other_obj + paddle::Tensor other_tensor; - if (has_other_double) { - eager_gil_scoped_release guard; - other_tensor = full_ad_func(self_tensor.shape(), - phi::Scalar(other_double), - self_tensor.dtype(), - place); - const phi::distributed::ProcessMesh* mesh = nullptr; - if (InputsContainDistTensor(&mesh, self_tensor, other_tensor)) { - ConvertAllInputsToDistTensor(mesh, self_tensor, other_tensor); - } - } else if (PyCheckTensor(other_obj)) { - auto& self_tensor_ref = self->tensor; - auto& other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + if (PyCheckTensor(other_obj)) { + auto& self_tensor_ref_addr = self->tensor; + auto& other_tensor_ref_addr = CastPyArg2Tensor(other_obj, 0); const phi::distributed::ProcessMesh* mesh = nullptr; - if (InputsContainDistTensor(&mesh, self_tensor_ref, other_tensor_ref)) { - ConvertAllInputsToDistTensor(mesh, self_tensor_ref, other_tensor_ref); + if (InputsContainDistTensor( + &mesh, self_tensor_ref_addr, other_tensor_ref_addr)) { + ConvertAllInputsToDistTensor( + mesh, self_tensor_ref_addr, other_tensor_ref_addr); } + + auto self_tensor_ref = self->tensor; + auto other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + // got 0-d tensor, and need type promotion. The rules same with Tensor + + // Scalar. + TypePromotionForZeroDimTensor("divide", self_tensor_ref, other_tensor_ref); + self_tensor = self_tensor_ref; other_tensor = other_tensor_ref; } else { @@ -879,46 +838,16 @@ static PyObject* tensor__rdiv__method(TensorObject* self, } } - // 3. promote types or unify right var type to left var - phi::DataType lhs_dtype = self_tensor.dtype(); - phi::DataType rhs_dtype = other_tensor.dtype(); - if (lhs_dtype != rhs_dtype) { - // note: only op_type in _supported_promote_complex_types_ should promote - // dtype - if (_complex_dtypes.find(lhs_dtype) != _complex_dtypes.end() || - _complex_dtypes.find(rhs_dtype) != _complex_dtypes.end()) { - phi::DataType promote_dtype = - framework::TransToPhiDataType(framework::PromoteTypesIfComplexExists( - framework::TransToProtoVarType(lhs_dtype), - framework::TransToProtoVarType(rhs_dtype))); - if (lhs_dtype != promote_dtype) { - // cast - eager_gil_scoped_release guard; - self_tensor = cast_ad_func(self_tensor, promote_dtype); - } - if (rhs_dtype != promote_dtype) { - eager_gil_scoped_release guard; - other_tensor = cast_ad_func(other_tensor, promote_dtype); - } - } else { - VLOG(6) << "The dtype of left and right Tensor are not the same, left " - "dtype is " - << lhs_dtype << ", but right dtype is " << rhs_dtype - << ", the right dtype will convert to " << lhs_dtype; + // 3. promote types or unify right var type to left var, float type promotion + // mv to divide_ad_func + if (self_tensor.dtype() == other_tensor.dtype()) { + if (_supported_int_dtype_.find(self_tensor.dtype()) != + _supported_int_dtype_.end()) { eager_gil_scoped_release guard; - other_tensor = cast_ad_func(other_tensor, lhs_dtype); + self_tensor = cast_ad_func(self_tensor, DataType::FLOAT32); + other_tensor = cast_ad_func(other_tensor, DataType::FLOAT32); } } - if (_supported_int_dtype_.find(self_tensor.dtype()) != - _supported_int_dtype_.end()) { - eager_gil_scoped_release guard; - self_tensor = cast_ad_func(self_tensor, DataType::FLOAT32); - } - if (_supported_int_dtype_.find(other_tensor.dtype()) != - _supported_int_dtype_.end()) { - eager_gil_scoped_release guard; - other_tensor = cast_ad_func(other_tensor, DataType::FLOAT32); - } // 4. calculation VLOG(6) << "Calling divide_ad_func in tensor__rdiv__method"; @@ -951,43 +880,46 @@ static PyObject* tensor__gt__method(TensorObject* self, // 1. scalar exists cases // there is no scalar function for __gt__ now - double other_double = 0.0; - bool has_other_double = false; if (PyFloat_Check(other_obj) || PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { if (PyFloat_Check(other_obj)) { - other_double = CastPyArg2Double(other_obj, "__gt__", 0); - has_other_double = true; if (_supported_int_dtype_.find(self_tensor.dtype()) != _supported_int_dtype_.end()) { eager_gil_scoped_release guard; self_tensor = cast_ad_func(self_tensor, DataType::FLOAT32); } - } else if (PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { - other_double = CastPyArg2Double(other_obj, "__gt__", 0); - has_other_double = true; + } else if (PyCheckInteger(other_obj) && + self_tensor.dtype() == DataType::BOOL) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func(self_tensor, DataType::INT64); + } + } else if (PyComplex_Check(other_obj)) { + if (is_support_complex(self_tensor.dtype()) == false) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func( + self_tensor, promoteTypes(self_tensor.dtype(), DataType::COMPLEX64)); } } // 2. create or get tensor for other_obj paddle::Tensor other_tensor; - if (has_other_double) { - eager_gil_scoped_release guard; - other_tensor = full_ad_func(self_tensor.shape(), - phi::Scalar(other_double), - self_tensor.dtype(), - place); - const phi::distributed::ProcessMesh* mesh = nullptr; - if (InputsContainDistTensor(&mesh, self_tensor, other_tensor)) { - ConvertAllInputsToDistTensor(mesh, self_tensor, other_tensor); - } - } else if (PyCheckTensor(other_obj)) { - auto& self_tensor_ref = self->tensor; - auto& other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + if (PyCheckTensor(other_obj)) { + auto& self_tensor_ref_addr = self->tensor; + auto& other_tensor_ref_addr = CastPyArg2Tensor(other_obj, 0); const phi::distributed::ProcessMesh* mesh = nullptr; - if (InputsContainDistTensor(&mesh, self_tensor_ref, other_tensor_ref)) { - ConvertAllInputsToDistTensor(mesh, self_tensor_ref, other_tensor_ref); + if (InputsContainDistTensor( + &mesh, self_tensor_ref_addr, other_tensor_ref_addr)) { + ConvertAllInputsToDistTensor( + mesh, self_tensor_ref_addr, other_tensor_ref_addr); } + + auto self_tensor_ref = self->tensor; + auto other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + // got 0-d tensor, and need type promotion. The rules same with Tensor + + // Scalar. + TypePromotionForZeroDimTensor( + "greater_than", self_tensor_ref, other_tensor_ref); + self_tensor = self_tensor_ref; other_tensor = other_tensor_ref; } else { @@ -1016,19 +948,7 @@ static PyObject* tensor__gt__method(TensorObject* self, } } - // 3. promote types or unify right var type to left var - phi::DataType lhs_dtype = self_tensor.dtype(); - phi::DataType rhs_dtype = other_tensor.dtype(); - if (lhs_dtype != rhs_dtype) { - VLOG(6) << "The dtype of left and right Tensor are not the same, left " - "dtype is " - << lhs_dtype << ", but right dtype is " << rhs_dtype - << ", the right dtype will convert to " << lhs_dtype; - eager_gil_scoped_release guard; - other_tensor = cast_ad_func(other_tensor, lhs_dtype); - } - - // 4. calculation + // 3. calculation VLOG(6) << "Calling greater_than_ad_func in tensor__gt__method"; { eager_gil_scoped_release guard; @@ -1060,43 +980,46 @@ static PyObject* tensor__ge__method(TensorObject* self, // 1. scalar exists cases // there is no scalar function for __ge__ now - double other_double = 0.0; - bool has_other_double = false; if (PyFloat_Check(other_obj) || PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { if (PyFloat_Check(other_obj)) { - other_double = CastPyArg2Double(other_obj, "__ge__", 0); - has_other_double = true; if (_supported_int_dtype_.find(self_tensor.dtype()) != _supported_int_dtype_.end()) { eager_gil_scoped_release guard; self_tensor = cast_ad_func(self_tensor, DataType::FLOAT32); } - } else if (PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { - other_double = CastPyArg2Double(other_obj, "__ge__", 0); - has_other_double = true; + } else if (PyCheckInteger(other_obj) && + self_tensor.dtype() == DataType::BOOL) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func(self_tensor, DataType::INT64); + } + } else if (PyComplex_Check(other_obj)) { + if (is_support_complex(self_tensor.dtype()) == false) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func( + self_tensor, promoteTypes(self_tensor.dtype(), DataType::COMPLEX64)); } } // 2. create or get tensor for other_obj paddle::Tensor other_tensor; - if (has_other_double) { - eager_gil_scoped_release guard; - other_tensor = full_ad_func(self_tensor.shape(), - phi::Scalar(other_double), - self_tensor.dtype(), - place); - const phi::distributed::ProcessMesh* mesh = nullptr; - if (InputsContainDistTensor(&mesh, self_tensor, other_tensor)) { - ConvertAllInputsToDistTensor(mesh, self_tensor, other_tensor); - } - } else if (PyCheckTensor(other_obj)) { - auto& self_tensor_ref = self->tensor; - auto& other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + if (PyCheckTensor(other_obj)) { + auto& self_tensor_ref_addr = self->tensor; + auto& other_tensor_ref_addr = CastPyArg2Tensor(other_obj, 0); const phi::distributed::ProcessMesh* mesh = nullptr; - if (InputsContainDistTensor(&mesh, self_tensor_ref, other_tensor_ref)) { - ConvertAllInputsToDistTensor(mesh, self_tensor_ref, other_tensor_ref); + if (InputsContainDistTensor( + &mesh, self_tensor_ref_addr, other_tensor_ref_addr)) { + ConvertAllInputsToDistTensor( + mesh, self_tensor_ref_addr, other_tensor_ref_addr); } + + auto self_tensor_ref = self->tensor; + auto other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + // got 0-d tensor, and need type promotion. The rules same with Tensor + + // Scalar. + TypePromotionForZeroDimTensor( + "greater_equal", self_tensor_ref, other_tensor_ref); + self_tensor = self_tensor_ref; other_tensor = other_tensor_ref; } else { @@ -1125,19 +1048,7 @@ static PyObject* tensor__ge__method(TensorObject* self, } } - // 3. promote types or unify right var type to left var - phi::DataType lhs_dtype = self_tensor.dtype(); - phi::DataType rhs_dtype = other_tensor.dtype(); - if (lhs_dtype != rhs_dtype) { - VLOG(6) << "The dtype of left and right Tensor are not the same, left " - "dtype is " - << lhs_dtype << ", but right dtype is " << rhs_dtype - << ", the right dtype will convert to " << lhs_dtype; - eager_gil_scoped_release guard; - other_tensor = cast_ad_func(other_tensor, lhs_dtype); - } - - // 4. calculation + // 3. calculation VLOG(6) << "Calling greater_equal_ad_func in tensor__ge__method"; { eager_gil_scoped_release guard; @@ -1170,43 +1081,46 @@ static PyObject* tensor__mod__method(TensorObject* self, // 1. scalar exists cases // there is no scalar_mod function for __mod__ now - float other_double = 0.0f; - bool has_other_double = false; if (PyFloat_Check(other_obj) || PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { if (PyFloat_Check(other_obj)) { - other_double = CastPyArg2Double(other_obj, "__mod__", 0); // NOLINT - has_other_double = true; if (_supported_int_dtype_.find(self_tensor.dtype()) != _supported_int_dtype_.end()) { eager_gil_scoped_release guard; self_tensor = cast_ad_func(self_tensor, DataType::FLOAT32); } - } else if (PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { - other_double = CastPyArg2Double(other_obj, "__mod__", 0); // NOLINT - has_other_double = true; + } else if (PyCheckInteger(other_obj) && + self_tensor.dtype() == DataType::BOOL) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func(self_tensor, DataType::INT64); + } + } else if (PyComplex_Check(other_obj)) { + if (is_support_complex(self_tensor.dtype()) == false) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func( + self_tensor, promoteTypes(self_tensor.dtype(), DataType::COMPLEX64)); } } // 2. create or get tensor for other_obj paddle::Tensor other_tensor; - if (has_other_double) { - eager_gil_scoped_release guard; - other_tensor = full_ad_func(self_tensor.shape(), - phi::Scalar(other_double), - self_tensor.dtype(), - self_tensor.place()); - const phi::distributed::ProcessMesh* mesh = nullptr; - if (InputsContainDistTensor(&mesh, self_tensor, other_tensor)) { - ConvertAllInputsToDistTensor(mesh, self_tensor, other_tensor); - } - } else if (PyCheckTensor(other_obj)) { - auto& self_tensor_ref = self->tensor; - auto& other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + if (PyCheckTensor(other_obj)) { + auto& self_tensor_ref_addr = self->tensor; + auto& other_tensor_ref_addr = CastPyArg2Tensor(other_obj, 0); const phi::distributed::ProcessMesh* mesh = nullptr; - if (InputsContainDistTensor(&mesh, self_tensor_ref, other_tensor_ref)) { - ConvertAllInputsToDistTensor(mesh, self_tensor_ref, other_tensor_ref); + if (InputsContainDistTensor( + &mesh, self_tensor_ref_addr, other_tensor_ref_addr)) { + ConvertAllInputsToDistTensor( + mesh, self_tensor_ref_addr, other_tensor_ref_addr); } + + auto self_tensor_ref = self->tensor; + auto other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + // got 0-d tensor, and need type promotion. The rules same with Tensor + + // Scalar. + TypePromotionForZeroDimTensor( + "remainder", self_tensor_ref, other_tensor_ref); + self_tensor = self_tensor_ref; other_tensor = other_tensor_ref; } else { @@ -1235,19 +1149,7 @@ static PyObject* tensor__mod__method(TensorObject* self, } } - // 3. promote types or unify right var type to left var - phi::DataType lhs_dtype = self_tensor.dtype(); - phi::DataType rhs_dtype = other_tensor.dtype(); - if (lhs_dtype != rhs_dtype) { - VLOG(6) << "The dtype of left and right Tensor are not the same, left " - "dtype is " - << lhs_dtype << ", but right dtype is " << rhs_dtype - << ", the right dtype will convert to " << lhs_dtype; - eager_gil_scoped_release guard; - other_tensor = cast_ad_func(other_tensor, lhs_dtype); - } - - // 4. calculation + // 3. calculation VLOG(6) << "Calling remainder_ad_func in tensor__mod__method"; { eager_gil_scoped_release guard; @@ -1404,43 +1306,46 @@ static PyObject* tensor__lt__method(TensorObject* self, // 1. scalar exists cases // there is no scalar function for __lt__ now - float other_double = 0.0f; - bool has_other_double = false; if (PyFloat_Check(other_obj) || PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { if (PyFloat_Check(other_obj)) { - other_double = CastPyArg2Double(other_obj, "__lt__", 0); // NOLINT - has_other_double = true; if (_supported_int_dtype_.find(self_tensor.dtype()) != _supported_int_dtype_.end()) { eager_gil_scoped_release guard; self_tensor = cast_ad_func(self_tensor, DataType::FLOAT32); } - } else if (PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { - other_double = CastPyArg2Double(other_obj, "__lt__", 0); // NOLINT - has_other_double = true; + } else if (PyCheckInteger(other_obj) && + self_tensor.dtype() == DataType::BOOL) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func(self_tensor, DataType::INT64); + } + } else if (PyComplex_Check(other_obj)) { + if (is_support_complex(self_tensor.dtype()) == false) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func( + self_tensor, promoteTypes(self_tensor.dtype(), DataType::COMPLEX64)); } } // 2. create or get tensor for other_obj paddle::Tensor other_tensor; - if (has_other_double) { - eager_gil_scoped_release guard; - other_tensor = full_ad_func(self_tensor.shape(), - phi::Scalar(other_double), - self_tensor.dtype(), - self_tensor.place()); - const phi::distributed::ProcessMesh* mesh = nullptr; - if (InputsContainDistTensor(&mesh, self_tensor, other_tensor)) { - ConvertAllInputsToDistTensor(mesh, self_tensor, other_tensor); - } - } else if (PyCheckTensor(other_obj)) { - auto& self_tensor_ref = self->tensor; - auto& other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + if (PyCheckTensor(other_obj)) { + auto& self_tensor_ref_addr = self->tensor; + auto& other_tensor_ref_addr = CastPyArg2Tensor(other_obj, 0); const phi::distributed::ProcessMesh* mesh = nullptr; - if (InputsContainDistTensor(&mesh, self_tensor_ref, other_tensor_ref)) { - ConvertAllInputsToDistTensor(mesh, self_tensor_ref, other_tensor_ref); + if (InputsContainDistTensor( + &mesh, self_tensor_ref_addr, other_tensor_ref_addr)) { + ConvertAllInputsToDistTensor( + mesh, self_tensor_ref_addr, other_tensor_ref_addr); } + + auto self_tensor_ref = self->tensor; + auto other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + // got 0-d tensor, and need type promotion. The rules same with Tensor + + // Scalar. + TypePromotionForZeroDimTensor( + "less_than", self_tensor_ref, other_tensor_ref); + self_tensor = self_tensor_ref; other_tensor = other_tensor_ref; } else { @@ -1469,19 +1374,7 @@ static PyObject* tensor__lt__method(TensorObject* self, } } - // 3. promote types or unify right var type to left var - phi::DataType lhs_dtype = self_tensor.dtype(); - phi::DataType rhs_dtype = other_tensor.dtype(); - if (lhs_dtype != rhs_dtype) { - VLOG(6) << "The dtype of left and right Tensor are not the same, left " - "dtype is " - << lhs_dtype << ", but right dtype is " << rhs_dtype - << ", the right dtype will convert to " << lhs_dtype; - eager_gil_scoped_release guard; - other_tensor = cast_ad_func(other_tensor, lhs_dtype); - } - - // 4. calculation + // // 3. calculation VLOG(6) << "Calling less_than_ad_func in tensor__lt__method"; { eager_gil_scoped_release guard; @@ -1513,43 +1406,46 @@ static PyObject* tensor__le__method(TensorObject* self, // 1. scalar exists cases // there is no scalar function for __le__ now - float other_double = 0.0f; - bool has_other_double = false; if (PyFloat_Check(other_obj) || PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { if (PyFloat_Check(other_obj)) { - other_double = CastPyArg2Double(other_obj, "__le__", 0); // NOLINT - has_other_double = true; if (_supported_int_dtype_.find(self_tensor.dtype()) != _supported_int_dtype_.end()) { eager_gil_scoped_release guard; self_tensor = cast_ad_func(self_tensor, DataType::FLOAT32); } - } else if (PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { - other_double = CastPyArg2Double(other_obj, "__le__", 0); // NOLINT - has_other_double = true; + } else if (PyCheckInteger(other_obj) && + self_tensor.dtype() == DataType::BOOL) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func(self_tensor, DataType::INT64); + } + } else if (PyComplex_Check(other_obj)) { + if (is_support_complex(self_tensor.dtype()) == false) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func( + self_tensor, promoteTypes(self_tensor.dtype(), DataType::COMPLEX64)); } } // 2. create or get tensor for other_obj paddle::Tensor other_tensor; - if (has_other_double) { - eager_gil_scoped_release guard; - other_tensor = full_ad_func(self_tensor.shape(), - phi::Scalar(other_double), - self_tensor.dtype(), - self_tensor.place()); - const phi::distributed::ProcessMesh* mesh = nullptr; - if (InputsContainDistTensor(&mesh, self_tensor, other_tensor)) { - ConvertAllInputsToDistTensor(mesh, self_tensor, other_tensor); - } - } else if (PyCheckTensor(other_obj)) { - auto& self_tensor_ref = self->tensor; - auto& other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + if (PyCheckTensor(other_obj)) { + auto& self_tensor_ref_addr = self->tensor; + auto& other_tensor_ref_addr = CastPyArg2Tensor(other_obj, 0); const phi::distributed::ProcessMesh* mesh = nullptr; - if (InputsContainDistTensor(&mesh, self_tensor_ref, other_tensor_ref)) { - ConvertAllInputsToDistTensor(mesh, self_tensor_ref, other_tensor_ref); + if (InputsContainDistTensor( + &mesh, self_tensor_ref_addr, other_tensor_ref_addr)) { + ConvertAllInputsToDistTensor( + mesh, self_tensor_ref_addr, other_tensor_ref_addr); } + + auto self_tensor_ref = self->tensor; + auto other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + // got 0-d tensor, and need type promotion. The rules same with Tensor + + // Scalar. + TypePromotionForZeroDimTensor( + "less_equal", self_tensor_ref, other_tensor_ref); + self_tensor = self_tensor_ref; other_tensor = other_tensor_ref; } else { @@ -1578,19 +1474,7 @@ static PyObject* tensor__le__method(TensorObject* self, } } - // 3. promote types or unify right var type to left var - phi::DataType lhs_dtype = self_tensor.dtype(); - phi::DataType rhs_dtype = other_tensor.dtype(); - if (lhs_dtype != rhs_dtype) { - VLOG(6) << "The dtype of left and right Tensor are not the same, left " - "dtype is " - << lhs_dtype << ", but right dtype is " << rhs_dtype - << ", the right dtype will convert to " << lhs_dtype; - eager_gil_scoped_release guard; - other_tensor = cast_ad_func(other_tensor, lhs_dtype); - } - - // 4. calculation + // 3. calculation VLOG(6) << "Calling less_equal_ad_func in tensor__le__method"; { eager_gil_scoped_release guard; @@ -1623,43 +1507,46 @@ static PyObject* tensor__floordiv__method(TensorObject* self, // 1. scalar exists cases or not // there is no scalar case for floordiv, but alse need to cast self_tensor // in need. - double other_double = 0.0; - bool has_other_double = false; if (PyFloat_Check(other_obj) || PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { if (PyFloat_Check(other_obj)) { - other_double = CastPyArg2Double(other_obj, "__floordiv__", 0); - has_other_double = true; if (_supported_int_dtype_.find(self_tensor.dtype()) != _supported_int_dtype_.end()) { eager_gil_scoped_release guard; self_tensor = cast_ad_func(self_tensor, DataType::FLOAT32); } - } else if (PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { - other_double = CastPyArg2Double(other_obj, "__floordiv__", 0); - has_other_double = true; + } else if (PyCheckInteger(other_obj) && + self_tensor.dtype() == DataType::BOOL) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func(self_tensor, DataType::INT64); + } + } else if (PyComplex_Check(other_obj)) { + if (is_support_complex(self_tensor.dtype()) == false) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func( + self_tensor, promoteTypes(self_tensor.dtype(), DataType::COMPLEX64)); } } // 2. create or get tensor for other_obj paddle::Tensor other_tensor; - if (has_other_double) { - eager_gil_scoped_release guard; - other_tensor = full_ad_func(self_tensor.shape(), - phi::Scalar(other_double), - self_tensor.dtype(), - self_tensor.place()); - const phi::distributed::ProcessMesh* mesh = nullptr; - if (InputsContainDistTensor(&mesh, self_tensor, other_tensor)) { - ConvertAllInputsToDistTensor(mesh, self_tensor, other_tensor); - } - } else if (PyCheckTensor(other_obj)) { - auto& self_tensor_ref = self->tensor; - auto& other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + if (PyCheckTensor(other_obj)) { + auto& self_tensor_ref_addr = self->tensor; + auto& other_tensor_ref_addr = CastPyArg2Tensor(other_obj, 0); const phi::distributed::ProcessMesh* mesh = nullptr; - if (InputsContainDistTensor(&mesh, self_tensor_ref, other_tensor_ref)) { - ConvertAllInputsToDistTensor(mesh, self_tensor_ref, other_tensor_ref); + if (InputsContainDistTensor( + &mesh, self_tensor_ref_addr, other_tensor_ref_addr)) { + ConvertAllInputsToDistTensor( + mesh, self_tensor_ref_addr, other_tensor_ref_addr); } + + auto self_tensor_ref = self->tensor; + auto other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + // got 0-d tensor, and need type promotion. The rules same with Tensor + + // Scalar. + TypePromotionForZeroDimTensor( + "floor_divide", self_tensor_ref, other_tensor_ref); + self_tensor = self_tensor_ref; other_tensor = other_tensor_ref; } else { @@ -1686,22 +1573,7 @@ static PyObject* tensor__floordiv__method(TensorObject* self, } } - // 3. promote types or unify right var type to left var - phi::DataType lhs_dtype = self_tensor.dtype(); - phi::DataType rhs_dtype = other_tensor.dtype(); - if (lhs_dtype != rhs_dtype) { - // note: only op_type in _supported_promote_complex_types_ should promote - // dtype, floordiv is not in _supported_promote_complex_types_, will not do - // promote dtype - VLOG(6) << "The dtype of left and right Tensor are not the same, left " - "dtype is " - << lhs_dtype << ", but right dtype is " << rhs_dtype - << ", the right dtype will convert to " << lhs_dtype; - eager_gil_scoped_release guard; - other_tensor = cast_ad_func(other_tensor, lhs_dtype); - } - - // 4. calculation + // 3. calculation VLOG(6) << "Calling floor_divide_ad_func in tensor__floordiv__method"; { eager_gil_scoped_release guard; @@ -1735,33 +1607,50 @@ static PyObject* tensor__pow__method(TensorObject* self, // 1. scalar exists cases if (PyFloat_Check(other_obj) || PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { - double other = 0.0; if (PyFloat_Check(other_obj)) { - other = CastPyArg2Double(other_obj, "__pow__", 0); if (_supported_int_dtype_.find(self_tensor.dtype()) != _supported_int_dtype_.end()) { eager_gil_scoped_release guard; self_tensor = cast_ad_func(self_tensor, DataType::FLOAT32); } - } else if (PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { - other = CastPyArg2Double(other_obj, "__pow__", 0); + } else if (PyCheckInteger(other_obj) && + self_tensor.dtype() == DataType::BOOL) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func(self_tensor, DataType::INT64); } + double other = CastPyArg2Double(other_obj, "__pow__", 0); { eager_gil_scoped_release guard; ret = CallScalarFuction(self_tensor, other, "pow"); } return ToPyObject(ret); + } else if (PyComplex_Check(other_obj)) { + if (is_support_complex(self_tensor.dtype()) == false) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func( + self_tensor, promoteTypes(self_tensor.dtype(), DataType::COMPLEX64)); + } } // 2. create or get tensor for other_obj paddle::Tensor other_tensor; if (PyCheckTensor(other_obj)) { - auto& self_tensor_ref = self->tensor; - auto& other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + auto& self_tensor_ref_addr = self->tensor; + auto& other_tensor_ref_addr = CastPyArg2Tensor(other_obj, 0); const phi::distributed::ProcessMesh* mesh = nullptr; - if (InputsContainDistTensor(&mesh, self_tensor_ref, other_tensor_ref)) { - ConvertAllInputsToDistTensor(mesh, self_tensor_ref, other_tensor_ref); + if (InputsContainDistTensor( + &mesh, self_tensor_ref_addr, other_tensor_ref_addr)) { + ConvertAllInputsToDistTensor( + mesh, self_tensor_ref_addr, other_tensor_ref_addr); } + + auto self_tensor_ref = self->tensor; + auto other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + // got 0-d tensor, and need type promotion. The rules same with Tensor + + // Scalar. + TypePromotionForZeroDimTensor( + "elementwise_pow", self_tensor_ref, other_tensor_ref); + self_tensor = self_tensor_ref; other_tensor = other_tensor_ref; } else { @@ -1788,19 +1677,7 @@ static PyObject* tensor__pow__method(TensorObject* self, } } - // 3. promote types or unify right var type to left var - phi::DataType lhs_dtype = self_tensor.dtype(); - phi::DataType rhs_dtype = other_tensor.dtype(); - if (lhs_dtype != rhs_dtype) { - VLOG(6) << "The dtype of left and right Tensor are not the same, left " - "dtype is " - << lhs_dtype << ", but right dtype is " << rhs_dtype - << ", the right dtype will convert to " << lhs_dtype; - eager_gil_scoped_release guard; - other_tensor = cast_ad_func(other_tensor, lhs_dtype); - } - - // 4. calculation + // 3. calculation VLOG(6) << "Calling elementwise_pow_ad_func in tensor__pow__method"; { eager_gil_scoped_release guard; @@ -1832,45 +1709,48 @@ static PyObject* tensor__rpow__method(TensorObject* self, PyObject* other_obj = PyTuple_GET_ITEM(args, 0); // 1. scalar exists cases or not - // there is no scalar case for rpow, but alse need to cast self_tensor in + // there is no scalar case for rpow, but also need to cast self_tensor in // need. - double other_double = 0.0; - bool has_other_double = false; if (PyFloat_Check(other_obj) || PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { if (PyFloat_Check(other_obj)) { - other_double = CastPyArg2Double(other_obj, "__rpow__", 0); - has_other_double = true; if (_supported_int_dtype_.find(self_tensor.dtype()) != _supported_int_dtype_.end()) { eager_gil_scoped_release guard; self_tensor = cast_ad_func(self_tensor, DataType::FLOAT32); } - } else if (PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { - other_double = CastPyArg2Double(other_obj, "__rpow__", 0); - has_other_double = true; + } else if (PyCheckInteger(other_obj) && + self_tensor.dtype() == DataType::BOOL) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func(self_tensor, DataType::INT64); + } + } else if (PyComplex_Check(other_obj)) { + if (is_support_complex(self_tensor.dtype()) == false) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func( + self_tensor, promoteTypes(self_tensor.dtype(), DataType::COMPLEX64)); } } // 2. create or get tensor for other_obj paddle::Tensor other_tensor; - if (has_other_double) { - eager_gil_scoped_release guard; - other_tensor = full_ad_func(self_tensor.shape(), - phi::Scalar(other_double), - self_tensor.dtype(), - self_tensor.place()); - const phi::distributed::ProcessMesh* mesh = nullptr; - if (InputsContainDistTensor(&mesh, self_tensor, other_tensor)) { - ConvertAllInputsToDistTensor(mesh, self_tensor, other_tensor); - } - } else if (PyCheckTensor(other_obj)) { - auto& self_tensor_ref = self->tensor; - auto& other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + if (PyCheckTensor(other_obj)) { + auto& self_tensor_ref_addr = self->tensor; + auto& other_tensor_ref_addr = CastPyArg2Tensor(other_obj, 0); const phi::distributed::ProcessMesh* mesh = nullptr; - if (InputsContainDistTensor(&mesh, self_tensor_ref, other_tensor_ref)) { - ConvertAllInputsToDistTensor(mesh, self_tensor_ref, other_tensor_ref); + if (InputsContainDistTensor( + &mesh, self_tensor_ref_addr, other_tensor_ref_addr)) { + ConvertAllInputsToDistTensor( + mesh, self_tensor_ref_addr, other_tensor_ref_addr); } + + auto self_tensor_ref = self->tensor; + auto other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + // got 0-d tensor, and need type promotion. The rules same with Tensor + + // Scalar. + TypePromotionForZeroDimTensor( + "elementwise_pow", self_tensor_ref, other_tensor_ref); + self_tensor = self_tensor_ref; other_tensor = other_tensor_ref; } else { @@ -1899,19 +1779,7 @@ static PyObject* tensor__rpow__method(TensorObject* self, } } - // 3. promote types or unify right var type to left var - phi::DataType lhs_dtype = self_tensor.dtype(); - phi::DataType rhs_dtype = other_tensor.dtype(); - if (lhs_dtype != rhs_dtype) { - VLOG(6) << "The dtype of left and right Tensor are not the same, left " - "dtype is " - << lhs_dtype << ", but right dtype is " << rhs_dtype - << ", the right dtype will convert to " << lhs_dtype; - eager_gil_scoped_release guard; - other_tensor = cast_ad_func(other_tensor, lhs_dtype); - } - - // 4. calculation + // 3. calculation VLOG(6) << "Calling elementwise_pow_ad_func in tensor__rpow__method"; { eager_gil_scoped_release guard; @@ -1943,43 +1811,46 @@ static PyObject* tensor__ne__method(TensorObject* self, // 1. scalar exists cases // there is no scalar function for __ne__ now - double other_double = 0.0; - bool has_other_double = false; if (PyFloat_Check(other_obj) || PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { if (PyFloat_Check(other_obj)) { - other_double = CastPyArg2Double(other_obj, "__ne__", 0); - has_other_double = true; if (_supported_int_dtype_.find(self_tensor.dtype()) != _supported_int_dtype_.end()) { eager_gil_scoped_release guard; self_tensor = cast_ad_func(self_tensor, DataType::FLOAT32); } - } else if (PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { - other_double = CastPyArg2Double(other_obj, "__ne__", 0); - has_other_double = true; + } else if (PyCheckInteger(other_obj) && + self_tensor.dtype() == DataType::BOOL) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func(self_tensor, DataType::INT64); + } + } else if (PyComplex_Check(other_obj)) { + if (is_support_complex(self_tensor.dtype()) == false) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func( + self_tensor, promoteTypes(self_tensor.dtype(), DataType::COMPLEX64)); } } // 2. create or get tensor for other_obj paddle::Tensor other_tensor; - if (has_other_double) { - eager_gil_scoped_release guard; - other_tensor = full_ad_func(self_tensor.shape(), - phi::Scalar(other_double), - self_tensor.dtype(), - self_tensor.place()); - const phi::distributed::ProcessMesh* mesh = nullptr; - if (InputsContainDistTensor(&mesh, self_tensor, other_tensor)) { - ConvertAllInputsToDistTensor(mesh, self_tensor, other_tensor); - } - } else if (PyCheckTensor(other_obj)) { - auto& self_tensor_ref = self->tensor; - auto& other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + if (PyCheckTensor(other_obj)) { + auto& self_tensor_ref_addr = self->tensor; + auto& other_tensor_ref_addr = CastPyArg2Tensor(other_obj, 0); const phi::distributed::ProcessMesh* mesh = nullptr; - if (InputsContainDistTensor(&mesh, self_tensor_ref, other_tensor_ref)) { - ConvertAllInputsToDistTensor(mesh, self_tensor_ref, other_tensor_ref); + if (InputsContainDistTensor( + &mesh, self_tensor_ref_addr, other_tensor_ref_addr)) { + ConvertAllInputsToDistTensor( + mesh, self_tensor_ref_addr, other_tensor_ref_addr); } + + auto self_tensor_ref = self->tensor; + auto other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + // got 0-d tensor, and need type promotion. The rules same with Tensor + + // Scalar. + TypePromotionForZeroDimTensor( + "not_equal", self_tensor_ref, other_tensor_ref); + self_tensor = self_tensor_ref; other_tensor = other_tensor_ref; } else { @@ -1996,8 +1867,10 @@ static PyObject* tensor__ne__method(TensorObject* self, full_ad_func({1}, value, DataType::COMPLEX64, self_tensor.place()); } else { eager_gil_scoped_release guard; - other_tensor = - full_ad_func({1}, value, self_tensor.dtype(), self_tensor.place()); + other_tensor = full_ad_func(self_tensor.shape(), + value, + self_tensor.dtype(), + self_tensor.place()); } } const phi::distributed::ProcessMesh* mesh = nullptr; @@ -2006,19 +1879,7 @@ static PyObject* tensor__ne__method(TensorObject* self, } } - // 3. promote types or unify right var type to left var - phi::DataType lhs_dtype = self_tensor.dtype(); - phi::DataType rhs_dtype = other_tensor.dtype(); - if (lhs_dtype != rhs_dtype) { - VLOG(6) << "The dtype of left and right Tensor are not the same, left " - "dtype is " - << lhs_dtype << ", but right dtype is " << rhs_dtype - << ", the right dtype will convert to " << lhs_dtype; - eager_gil_scoped_release guard; - other_tensor = cast_ad_func(other_tensor, lhs_dtype); - } - - // 4. calculation + // 3. calculation VLOG(6) << "Calling not_equal_ad_func in tensor__ne__method"; { eager_gil_scoped_release guard; @@ -2050,43 +1911,45 @@ static PyObject* tensor__eq__method(TensorObject* self, // 1. scalar exists cases // there is no scalar function for __eq__ now - double other_double = 0.0; - bool has_other_double = false; if (PyFloat_Check(other_obj) || PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { if (PyFloat_Check(other_obj)) { - other_double = CastPyArg2Double(other_obj, "__eq__", 0); - has_other_double = true; if (_supported_int_dtype_.find(self_tensor.dtype()) != _supported_int_dtype_.end()) { eager_gil_scoped_release guard; self_tensor = cast_ad_func(self_tensor, DataType::FLOAT32); } - } else if (PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { - other_double = CastPyArg2Double(other_obj, "__eq__", 0); - has_other_double = true; + } else if (PyCheckInteger(other_obj) && + self_tensor.dtype() == DataType::BOOL) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func(self_tensor, DataType::INT64); + } + } else if (PyComplex_Check(other_obj)) { + if (is_support_complex(self_tensor.dtype()) == false) { + eager_gil_scoped_release guard; + self_tensor = cast_ad_func( + self_tensor, promoteTypes(self_tensor.dtype(), DataType::COMPLEX64)); } } // 2. create or get tensor for other_obj paddle::Tensor other_tensor; - if (has_other_double) { - eager_gil_scoped_release guard; - other_tensor = full_ad_func(self_tensor.shape(), - phi::Scalar(other_double), - self_tensor.dtype(), - self_tensor.place()); - const phi::distributed::ProcessMesh* mesh = nullptr; - if (InputsContainDistTensor(&mesh, self_tensor, other_tensor)) { - ConvertAllInputsToDistTensor(mesh, self_tensor, other_tensor); - } - } else if (PyCheckTensor(other_obj)) { - auto& self_tensor_ref = self->tensor; - auto& other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + if (PyCheckTensor(other_obj)) { + auto& self_tensor_ref_addr = self->tensor; + auto& other_tensor_ref_addr = CastPyArg2Tensor(other_obj, 0); const phi::distributed::ProcessMesh* mesh = nullptr; - if (InputsContainDistTensor(&mesh, self_tensor_ref, other_tensor_ref)) { - ConvertAllInputsToDistTensor(mesh, self_tensor_ref, other_tensor_ref); + if (InputsContainDistTensor( + &mesh, self_tensor_ref_addr, other_tensor_ref_addr)) { + ConvertAllInputsToDistTensor( + mesh, self_tensor_ref_addr, other_tensor_ref_addr); } + + auto self_tensor_ref = self->tensor; + auto other_tensor_ref = CastPyArg2Tensor(other_obj, 0); + // got 0-d tensor, and need type promotion. The rules same with Tensor + + // Scalar. + TypePromotionForZeroDimTensor("equal", self_tensor_ref, other_tensor_ref); + self_tensor = self_tensor_ref; other_tensor = other_tensor_ref; } else { @@ -2103,8 +1966,10 @@ static PyObject* tensor__eq__method(TensorObject* self, full_ad_func({1}, value, DataType::COMPLEX64, self_tensor.place()); } else { eager_gil_scoped_release guard; - other_tensor = - full_ad_func({1}, value, self_tensor.dtype(), self_tensor.place()); + other_tensor = full_ad_func(self_tensor.shape(), + value, + self_tensor.dtype(), + self_tensor.place()); } } const phi::distributed::ProcessMesh* mesh = nullptr; @@ -2113,19 +1978,7 @@ static PyObject* tensor__eq__method(TensorObject* self, } } - // 3. promote types or unify right var type to left var - phi::DataType lhs_dtype = self_tensor.dtype(); - phi::DataType rhs_dtype = other_tensor.dtype(); - if (lhs_dtype != rhs_dtype) { - VLOG(6) << "The dtype of left and right Tensor are not the same, left " - "dtype is " - << lhs_dtype << ", but right dtype is " << rhs_dtype - << ", the right dtype will convert to " << lhs_dtype; - eager_gil_scoped_release guard; - other_tensor = cast_ad_func(other_tensor, lhs_dtype); - } - - // 4. calculation + // 3. calculation VLOG(6) << "Calling equal_ad_func in tensor__eq__method"; { eager_gil_scoped_release guard; diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 7a441734926c4..25a1851c148f3 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -947,9 +947,11 @@ PYBIND11_MODULE(libpaddle, m) { m.def("set_num_threads", &platform::SetNumThreads); m.def("need_type_promotion", - [](framework::proto::VarType::Type type_x, + [](const std::string &op_name, + framework::proto::VarType::Type type_x, framework::proto::VarType::Type type_y) { - return phi::NeedTypePromotion(framework::TransToPhiDataType(type_x), + return phi::NeedTypePromotion(op_name, + framework::TransToPhiDataType(type_x), framework::TransToPhiDataType(type_y)); }); m.def("get_promote_dtype", @@ -961,7 +963,13 @@ PYBIND11_MODULE(libpaddle, m) { framework::TransToPhiDataType(type_x), framework::TransToPhiDataType(type_y))); }); - + m.def("is_common_dtype_for_scalar", + [](framework::proto::VarType::Type type_x, + framework::proto::VarType::Type type_y) { + return phi::is_common_dtype_for_scalar( + framework::TransToPhiDataType(type_x), + framework::TransToPhiDataType(type_y)); + }); m.def("disable_signal_handler", &DisableSignalHandler); m.def("clear_gradients", diff --git a/paddle/phi/common/type_promotion.h b/paddle/phi/common/type_promotion.h index fdb3f1e717faf..e8d8af1221c0b 100644 --- a/paddle/phi/common/type_promotion.h +++ b/paddle/phi/common/type_promotion.h @@ -43,7 +43,8 @@ inline int DataTypeToNum(const DataType& dtype) { case DataType::BFLOAT16: return 11; default: - PD_THROW("Invalid enum data type for type promote `", dtype, "`."); + PADDLE_THROW(phi::errors::InvalidType( + "Invalid enum data type for type promote %s.", dtype)); } } @@ -82,7 +83,7 @@ inline static DataType promoteTypes(DataType x, DataType y) { return _promoteTypesLookup[DataTypeToNum(x)][DataTypeToNum(y)]; } -static inline bool is_support_float(DataType dtype) { +inline bool is_support_float(DataType dtype) { if (dtype == DataType::FLOAT16 || dtype == DataType::FLOAT32 || dtype == DataType::FLOAT64 || dtype == DataType::BFLOAT16) { return true; @@ -91,22 +92,72 @@ static inline bool is_support_float(DataType dtype) { } } +inline bool is_support_complex(DataType dtype) { + if (dtype == DataType::COMPLEX64 || dtype == DataType::COMPLEX128) { + return true; + } else { + return false; + } +} + +// only T+S support int type promotion +inline bool is_support_int(DataType dtype) { + if (dtype == DataType::UINT8 || dtype == DataType::INT8 || + dtype == DataType::INT16 || dtype == DataType::INT32 || + dtype == DataType::INT64) { + return true; + } else { + return false; + } +} + +inline bool is_common_dtype_for_scalar(DataType x, DataType y) { + if ((is_support_int(x) && is_support_int(y)) || + (is_support_float(x) && is_support_float(y)) || + (is_support_complex(x) && is_support_complex(y))) { + return true; + } else { + return false; + } +} + inline phi::DataType GetPromoteDtype(const std::string& op_name, const DataType x, const DataType y) { - // future will deal this by different rule - if (op_name == "greater_than") { - // bool logic - return DataType::BOOL; - } else { - return phi::promoteTypes(x, y); + if (op_name == "divide") { + // only T+S can run into this branch + if (is_support_int(x) && is_support_int(y)) { + return DataType::FLOAT32; + } } + return phi::promoteTypes(x, y); } -inline bool NeedTypePromotion(const DataType x, const DataType y) { - // Tensor + Tensor only support type promotion for float type - if ((x != y) && is_support_float(x) && is_support_float(y)) { - return true; +inline bool NeedTypePromotion(const std::string& op_name, + const DataType x, + const DataType y) { + // Tensor + Tensor type promotion only support calculations between + // floating-point numbers and between complex and real numbers. + if (x != y) { +// TODO(Xi Zhao): we got special case for add now, should remove it in furture. +#ifdef PADDLE_WITH_CUDA + if (op_name == "add" && x == DataType::FLOAT32 && + (y == phi::DataType::BFLOAT16 || y == phi::DataType::FLOAT16)) { + return false; + } +#endif + + if ((is_support_float(x) && is_support_float(y)) || + (is_support_complex(x) || is_support_complex(y))) { + return true; + } else { + PADDLE_THROW(phi::errors::InvalidType( + "Type promotion only support calculations between floating-point " + "numbers and between complex and real numbers. But got different " + "data type x: %s, y: %s.", + x, + y)); + } } else { return false; } diff --git a/paddle/phi/kernels/kps/elementwise_kernel.cu b/paddle/phi/kernels/kps/elementwise_kernel.cu index a6caa95b766e1..ca0ad275bcd1c 100644 --- a/paddle/phi/kernels/kps/elementwise_kernel.cu +++ b/paddle/phi/kernels/kps/elementwise_kernel.cu @@ -59,17 +59,19 @@ void MultiPrecisionAddKernelImpl(const Context& dev_ctx, std::vector inputs = {&x, &y}; std::vector outputs = {out}; if (y.dtype() == phi::DataType::BFLOAT16) { - funcs::ElementwiseKernel( + funcs::BroadcastKernel( dev_ctx, inputs, &outputs, - funcs::MultiPrecisionAddFunctor()); + funcs::MultiPrecisionAddFunctor(), + -1); } else if (y.dtype() == phi::DataType::FLOAT16) { - funcs::ElementwiseKernel( + funcs::BroadcastKernel( dev_ctx, inputs, &outputs, - funcs::MultiPrecisionAddFunctor()); + funcs::MultiPrecisionAddFunctor(), + -1); } else { PADDLE_THROW(phi::errors::InvalidArgument( "Unsupport x dtype:%s, y dtype:%s for add(x, y) operation", diff --git a/python/paddle/base/framework.py b/python/paddle/base/framework.py index b575fb3d04698..1b635daef7d64 100644 --- a/python/paddle/base/framework.py +++ b/python/paddle/base/framework.py @@ -58,14 +58,44 @@ _global_flags_ = core.globals() SUPPORT_PROMOTION_OPS_AND_INPUTNAME = { - "elementwise_add": ["X", "Y"], - "elementwise_add_grad": ["X", "Y"], - "elementwise_sub": ["X", "Y"], - "elementwise_sub_grad": ["X", "Y"], - "elementwise_mul": ["X", "Y"], - "elementwise_mul_grad": ["X", "Y"], - "where": ["X", "Y"], - "where_grad": ["X", "Y"], + "elementwise_add": ['X', 'Y'], + "elementwise_add_grad": ['X', 'Y'], + "elementwise_sub": ['X', 'Y'], + "elementwise_sub_grad": ['X', 'Y'], + "elementwise_mul": ['X', 'Y'], + "elementwise_mul_grad": ['X', 'Y'], + "elementwise_div": ['X', 'Y'], + "elementwise_div_grad": ['X', 'Y'], + "elementwise_floordiv": ['X', 'Y'], + "elementwise_floordiv_grad": ['X', 'Y'], + "elementwise_pow": ['X', 'Y'], + "elementwise_pow_grad": ['X', 'Y'], + "where": ['X', 'Y'], + "where_grad": ['X', 'Y'], + "equal": ['X', 'Y'], + "not_equal": ['X', 'Y'], + "less_than": ['X', 'Y'], + "less_equal": ['X', 'Y'], + "greater_than": ['X', 'Y'], + "greater_equal": ['X', 'Y'], + "logical_and": ['X', 'Y'], + "logical_or": ['X', 'Y'], + "logical_xor": ['X', 'Y'], + "elementwise_fmax": ['X', 'Y'], + "elementwise_fmax_grad": ['X', 'Y'], + "elementwise_fmin": ['X', 'Y'], + "elementwise_fmin_grad": ['X', 'Y'], + "elementwise_max": ['X', 'Y'], + "elementwise_max_grad": ['X', 'Y'], + "elementwise_min": ['X', 'Y'], + "elementwise_min_grad": ['X', 'Y'], + "elementwise_mod": ['X', 'Y'], + "elementwise_mod_grad": ['X', 'Y'], + "huber_loss": ['X', 'Y'], + "huber_loss_grad": ['X', 'Y'], + "nextafter": ['x', 'y'], + "atan2": ['X1', 'X2'], + "atan2_grad": ['X1', 'X2'], } @@ -8178,8 +8208,12 @@ def dtype_to_str(in_dtype): return "fp32" elif in_dtype == paddle.float64: return "fp64" + elif in_dtype == core.VarDesc.VarType.COMPLEX64: + return "complex64" + elif in_dtype == core.VarDesc.VarType.COMPLEX128: + return "complex128" else: - return None + raise TypeError(f"got unspport data type for promotion: {in_dtype}.") def add_cast_for_type_promotion(op, block, idx, var_name, out_dtype): @@ -8269,7 +8303,9 @@ def process_type_promotion(program): all_input_name_need_cast.append(input_arg_name) # only support promote between float - if core.need_type_promotion(*all_dtypes): + if len(all_dtypes) == 2 and core.need_type_promotion( + op.type, *all_dtypes + ): common_dtype = core.get_promote_dtype(op.type, *all_dtypes) for input_name_need_cast in all_input_name_need_cast: var_name = op.block._var_recursive(input_name_need_cast) diff --git a/python/paddle/base/layers/math_op_patch.py b/python/paddle/base/layers/math_op_patch.py index 59654b03ecc8c..92b6d9a29d9f7 100644 --- a/python/paddle/base/layers/math_op_patch.py +++ b/python/paddle/base/layers/math_op_patch.py @@ -32,6 +32,11 @@ core.VarDesc.VarType.INT32, core.VarDesc.VarType.INT64, ] +_supported_complex_dtype_ = [ + core.VarDesc.VarType.COMPLEX64, + core.VarDesc.VarType.COMPLEX128, +] + compare_ops = ['__eq__', '__ne__', '__lt__', '__le__', '__gt__', '__ge__'] @@ -42,6 +47,20 @@ "__rsub__", "__mul__", "__rmul__", + "__mod__", + "__div__", + "__rdiv__", + "__truediv__", + "__rtruediv__", + "__floordiv__", + "__pow__", + "__rpow__", + "__eq__", + "__ne__", + "__lt__", + "__le__", + "__gt__", + "__ge__", ] EXPRESSION_MAP = { @@ -575,10 +594,19 @@ def __impl__(self, other_var): and self.dtype in _supported_int_dtype_ ): self = astype(self, 'float32') + # bool(tensor) + int(scalar) will do type promotion to int64 + if self.dtype == core.VarDesc.VarType.BOOL: + self = astype(self, 'int64') # here use `scale` replace `elementwise` to get better performance # but only +, -, *, / can use this method if scalar_method is not None: return scalar_method(self, other_var) + elif isinstance(other_var, complex): + if self.dtype not in _supported_complex_dtype_: + self = astype(self, 'complex64') + other_var = create_new_tmp_var( + current_block(self), dtype='complex64' + ) else: # do nothing pass @@ -612,7 +640,29 @@ def __impl__(self, other_var): if lhs_dtype != rhs_dtype: if method_name in SUPPORT_PROMOTION_OPS: - if core.need_type_promotion(lhs_dtype, rhs_dtype): + # different major types or both 0-d tensor follow with T+T rule. + if len(other_var.shape) == 0 or len(self.shape) == 0: + if not core.is_common_dtype_for_scalar( + lhs_dtype, rhs_dtype + ) or ( + len(other_var.shape) == 0 and len(self.shape) == 0 + ): + promote_type = core.get_promote_dtype( + op_type, lhs_dtype, rhs_dtype + ) + if lhs_dtype != promote_type: + self = astype(self, promote_type) + if rhs_dtype != promote_type: + other_var = astype(other_var, promote_type) + # common major types follow with tensor: int32(tensor) + int64(scalar) = int32 + else: + if len(self.shape) == 0: + self = astype(self, rhs_dtype) + else: + other_var = astype(other_var, lhs_dtype) + elif core.need_type_promotion( + op_type, lhs_dtype, rhs_dtype + ): # only report warning here, real promotion deal in Executor warnings.warn( f"The input dtypes of OP {op_type} are {lhs_dtype} and {rhs_dtype}, the output will be auto-promoted" @@ -620,13 +670,10 @@ def __impl__(self, other_var): warnings.filterwarnings( "ignore", message="The input dtypes of OP" ) - else: - # NOTE(zoooo0820): Currently, we still keep the old illogical \ - # logic for compatibility reasons - other_var = astype(other_var, lhs_dtype) - else: - other_var = astype(other_var, lhs_dtype) + raise TypeError( + f"got different data type in {op_type} between {lhs_dtype} and {rhs_dtype}." + ) if reverse: tmp = self @@ -634,11 +681,12 @@ def __impl__(self, other_var): other_var = tmp if ( - op_type == "divide" or op_type == "elementwise_div" - ) and self.dtype in _supported_int_dtype_: + (op_type == "divide" or op_type == "elementwise_div") + and self.dtype in _supported_int_dtype_ + and self.dtype == other_var.dtype + ): self = astype(self, 'float32') other_var = astype(other_var, 'float32') - # NOTE(zhiqiu): the output of compare operator should be bool. if method_name in compare_ops: out = create_new_tmp_var(current_block(self), dtype="bool") diff --git a/python/paddle/distribution/gumbel.py b/python/paddle/distribution/gumbel.py index 005801ae6b7cc..a388f62c512b6 100644 --- a/python/paddle/distribution/gumbel.py +++ b/python/paddle/distribution/gumbel.py @@ -189,9 +189,11 @@ def prob(self, value): Tensor: probability.The data type is same with value. """ - y = (self.loc - value) / self.scale + y = (self.loc - value.astype(self.loc.dtype)) / self.scale.astype( + self.loc.dtype + ) - return paddle.exp(y - paddle.exp(y)) / self.scale + return paddle.exp(y - paddle.exp(y)) / self.scale.astype(y.dtype) def log_prob(self, value): """Log probability density/mass function. @@ -214,7 +216,12 @@ def cdf(self, value): Tensor: cumulative probability of value. """ - return paddle.exp(-paddle.exp(-(value - self.loc) / self.scale)) + return paddle.exp( + -paddle.exp( + -(value - self.loc.astype(value.dtype)) + / self.scale.astype(value.dtype) + ) + ) def entropy(self): """Entropy of Gumbel distribution. diff --git a/python/paddle/metric/metrics.py b/python/paddle/metric/metrics.py index 5ef30f909939a..8b46c9d693146 100644 --- a/python/paddle/metric/metrics.py +++ b/python/paddle/metric/metrics.py @@ -271,7 +271,7 @@ def compute(self, pred, label, *args): elif label.shape[-1] != 1: # one-hot label label = paddle.argmax(label, axis=-1, keepdim=True) - correct = pred == label + correct = pred == label.astype(pred.dtype) return paddle.cast(correct, dtype='float32') def update(self, correct, *args): diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index 2fe02ef0a2259..d15495993ce0e 100644 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -1594,7 +1594,6 @@ def poisson_nll_loss( if not (input.shape == label.shape): raise ValueError("input's shape must equal to label's shape") - label = paddle.cast(label, input.dtype) loss_out = 0 if log_input: loss_out = paddle.exp(input) - label * input @@ -1975,7 +1974,7 @@ def warpctc( loss_out = paddle.squeeze(loss_out, [-1]) assert reduction in ['mean', 'sum', 'none'] if reduction == 'mean': - loss_out = paddle.mean(loss_out / label_lengths) + loss_out = paddle.mean(loss_out / label_lengths.astype(loss_out.dtype)) elif reduction == 'sum': loss_out = paddle.sum(loss_out) return loss_out @@ -2932,21 +2931,27 @@ def cross_entropy( if weight is None: mask = paddle.cast(mask, dtype=out_sum.dtype) count = _C_ops.sum(mask, [], None, False) - ret = out_sum / (count + (count == 0.0)) + ret = out_sum / (count + (count == 0.0).astype(count.dtype)) else: mask = paddle.cast(mask, weight_gather_reshape.dtype) weight_ignored = _C_ops.multiply( mask, weight_gather_reshape ) weight_sum = _C_ops.sum(weight_ignored, [], None, False) - ret = out_sum / (weight_sum + (weight_sum == 0.0)) + ret = out_sum / ( + weight_sum + + (weight_sum == 0.0).astype(weight_sum.dtype) + ) return ret elif weight is not None: out_sum = _C_ops.sum(out, [], None, False) total_weight = _C_ops.sum( weight_gather_reshape, [], None, False ) - return out_sum / (total_weight + (total_weight == 0.0)) + return out_sum / ( + total_weight + + (total_weight == 0.0).astype(total_weight.dtype) + ) else: return _C_ops.mean_all(out) diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 6e0edd1394c99..dd490cff36995 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -1731,7 +1731,10 @@ def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): "The value of Input(fweights) cannot be negative, but received " f"min of Input(fweights) is {fweights.min()}." ) - if not paddle.all(fweights == paddle.round(fweights.astype('float64'))): + if not paddle.all( + fweights + == paddle.round(fweights.astype('float64')).astype(fweights.dtype) + ): raise ValueError("Input(fweights) must be integer ") if aweights is not None: @@ -1773,7 +1776,7 @@ def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): nx_w = nx if w is not None and aweights is not None and ddof: - norm_factor = w_sum - (w * aweights).sum() / w_sum + norm_factor = w_sum - (w * aweights.astype(w.dtype)).sum() / w_sum else: norm_factor = w_sum - ddof norm_factor = paddle.clip(norm_factor, min=0) diff --git a/python/paddle/tensor/logic.py b/python/paddle/tensor/logic.py index 38855bd422147..3953320983467 100755 --- a/python/paddle/tensor/logic.py +++ b/python/paddle/tensor/logic.py @@ -86,11 +86,6 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): helper = LayerHelper(op_name, **locals()) - if binary_op and x.dtype != y.dtype: - raise ValueError( - f"(InvalidArgument) The DataType of {op_name} Op's Variable must be consistent, but received {x.dtype} and {y.dtype}." - ) - if out is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index b6f84fdf74130..2ded7103279ae 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -779,8 +779,11 @@ def logaddexp(x, y, name=None): Tensor(shape=[3], dtype=float64, place=Place(cpu), stop_gradient=True, [-0.30685282, -0.68673831, -0.87307199]) """ - - return paddle.log1p(paddle.exp(-paddle.abs(x - y))) + paddle.maximum(x, y) + log_1p = paddle.log1p(paddle.exp(-paddle.abs(x - y))) + maximum = paddle.maximum(x, y) + if maximum.dtype == paddle.int32 or maximum.dtype == paddle.int64: + maximum = maximum.astype(log_1p.dtype) + return log_1p + maximum def subtract(x, y, name=None): @@ -1103,11 +1106,6 @@ def multiply(x, y, name=None): if in_dynamic_or_pir_mode(): return _C_ops.multiply(x, y) else: - if x.dtype != y.dtype: - raise TypeError( - f'Input tensors must be same type, but received type of x: {x.dtype}, type of y: {y.dtype} ' - ) - return _elementwise_op(LayerHelper('elementwise_mul', **locals())) diff --git a/python/paddle/tensor/stat.py b/python/paddle/tensor/stat.py index d7e3a7a7d6e87..545b6bf21ca9e 100644 --- a/python/paddle/tensor/stat.py +++ b/python/paddle/tensor/stat.py @@ -540,7 +540,9 @@ def median(x, axis=None, keepdim=False, mode='avg', name=None): ) out_tensor = out_tensor + paddle.sum( - paddle.cast(paddle.isnan(x), dtype=dtype) * x, axis=axis, keepdim=True + paddle.cast(paddle.isnan(x), dtype=dtype) * x.astype(dtype), + axis=axis, + keepdim=True, ) if is_flatten: if keepdim: @@ -708,7 +710,7 @@ def _compute_index(index): if interpolation == "midpoint": return (tensor_upper + tensor_below) / 2 - weights = (index - indices_below).astype(x.dtype) + weights = (index - indices_below.astype(index.dtype)).astype(x.dtype) # "linear" return paddle.lerp( tensor_below.astype(x.dtype), diff --git a/test/auto_parallel/test_to_static.py b/test/auto_parallel/test_to_static.py index 7a3f9f204f61b..a173745a50845 100644 --- a/test/auto_parallel/test_to_static.py +++ b/test/auto_parallel/test_to_static.py @@ -112,7 +112,7 @@ def test_apply_optimizer(self): program_helper.to('train') forward_ops = program_helper.main_program.block(0).ops - self.assertEqual(len(forward_ops), 17) + self.assertEqual(len(forward_ops), 18) # step 2: apply optimizer to generate whole program optimize_ops, _ = program_helper.apply_optimizer(optimizer) @@ -122,7 +122,7 @@ def test_apply_optimizer(self): for op in program_helper.main_program.block(0).ops if op.type == 'sgd' ] - self.assertEqual(len(all_ops), 37) + self.assertEqual(len(all_ops), 38) self.assertEqual(len(optimize_ops), len(sgd_ops)) program_helper.reset() diff --git a/test/deprecated/legacy_test/test_math_op_patch_var_base.py b/test/deprecated/legacy_test/test_math_op_patch_var_base.py index 93581476d1092..56df04664bc35 100644 --- a/test/deprecated/legacy_test/test_math_op_patch_var_base.py +++ b/test/deprecated/legacy_test/test_math_op_patch_var_base.py @@ -542,8 +542,8 @@ def test_add_different_dtype(self): np.testing.assert_array_equal(res.numpy(), a_np + b_np) def test_floordiv_different_dtype(self): - a_np = np.full(self.shape, 10, np.int64) - b_np = np.full(self.shape, 2, np.int32) + a_np = np.full(self.shape, 10, np.float32) + b_np = np.full(self.shape, 2, np.float16) with base.dygraph.guard(): a = paddle.to_tensor(a_np) b = paddle.to_tensor(b_np) diff --git a/test/deprecated/legacy_test/test_save_inference_model_conditional_op.py b/test/deprecated/legacy_test/test_save_inference_model_conditional_op.py index bec0bc539c9a5..e1ae033231a1c 100644 --- a/test/deprecated/legacy_test/test_save_inference_model_conditional_op.py +++ b/test/deprecated/legacy_test/test_save_inference_model_conditional_op.py @@ -69,7 +69,7 @@ def __init__(self): super().__init__() def forward(self, x): - y = paddle.to_tensor([5]) + y = paddle.to_tensor([5], dtype='int32') if x > y: x = x + 1 else: diff --git a/test/deprecated/legacy_test/test_sparse_sum_op.py b/test/deprecated/legacy_test/test_sparse_sum_op.py index 8d245508b3d3e..955986508d1a3 100644 --- a/test/deprecated/legacy_test/test_sparse_sum_op.py +++ b/test/deprecated/legacy_test/test_sparse_sum_op.py @@ -62,7 +62,9 @@ def check_result( mask = paddle.randint(0, 2, x_shape) # "+ 1" to make sure that all zero elements in "origin_x" is caused by multiplying by "mask", # or the backward checks may fail. - origin_x = (paddle.rand(x_shape, dtype='float64') + 1) * mask + origin_x = ( + paddle.rand(x_shape, dtype='float64') + 1 + ) * mask.astype('float64') dense_x = origin_x.detach() dense_x.stop_gradient = False dense_out = paddle.sum(dense_x, dims, keepdim=keepdim, dtype=dtype) @@ -76,7 +78,7 @@ def check_result( sp_out.backward() np.testing.assert_allclose( sp_x.grad.to_dense().numpy(), - (dense_x.grad * mask).numpy(), + (dense_x.grad * mask.astype(dense_x.grad.dtype)).numpy(), rtol=1e-05, ) @@ -123,7 +125,9 @@ def check_result_coo(self, x_shape, dims, keepdim, dtype=None): mask = paddle.randint(0, 2, x_shape) while paddle.sum(mask) == 0: mask = paddle.randint(0, 2, x_shape) - origin_data = (paddle.rand(x_shape, dtype='float32') + 1) * mask + origin_data = ( + paddle.rand(x_shape, dtype='float32') + 1 + ) * mask.astype('float32') sparse_data = origin_data.detach().to_sparse_coo( sparse_dim=len(x_shape) ) diff --git a/test/deprecated/legacy_test/test_tensor_scalar_type_promotion_static.py b/test/deprecated/legacy_test/test_tensor_scalar_type_promotion_static.py index 8ec524e579e7d..ed17cc32bea97 100644 --- a/test/deprecated/legacy_test/test_tensor_scalar_type_promotion_static.py +++ b/test/deprecated/legacy_test/test_tensor_scalar_type_promotion_static.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,11 +21,13 @@ # Support types are ref from `paddle.tensor.math` # - Related paddle dtypes: -# - int type: int64, (no test here: uint8, int8, int16, int32) -# - float type: float32, (no test here: float64) +# - int type: int64, uint8, int8, int16, int32 +# - float type: float16, bfloat16, float32, float64 +# - complex type: complex64, complex128 # - Python scalar dtypes: # - int(64) -# - float(64) +# - float(32) +# - complex(64) class TestTensorScalarTypePromotionStatic(unittest.TestCase): @@ -363,5 +365,429 @@ def test_tensor_mod_scalar(self): self.check_operation(a, b, c, '%') +def create_test_case( + baseclass, + dtype, + expected_out_dtype_with_int=None, + expected_out_dtype_with_float=None, + expected_out_dtype_with_complex=None, +): + class TestPromotion(baseclass): + def set_dtype(self): + self.dtype = dtype + self.expected_out_dtype_with_int = expected_out_dtype_with_int + self.expected_out_dtype_with_float = expected_out_dtype_with_float + # static not support compute with scalar complex + self.expected_out_dtype_with_complex = ( + expected_out_dtype_with_complex + ) + + cls_name = f"{baseclass.__name__}{dtype}" + TestPromotion.__name__ = cls_name + globals()[cls_name] = TestPromotion + + +class TestTensorAddScalar(unittest.TestCase): + def setUp(self): + paddle.enable_static() + self.set_dtype() + self.exe = paddle.static.Executor() + + def set_dtype(self): + self.dtype = 'float32' + self.expected_out_dtype_with_int = 'float32' + self.expected_out_dtype_with_float = 'float32' + self.expected_out_dtype_with_complex = 'complex64' + + def generate_test_value(self): + self.value = paddle.rand([2, 3, 4]).astype(self.dtype) + + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + out_int = self.value + 1 + out_float = self.value + 1.0 + # out_complex = self.value + 2j + res = self.exe.run(prog, fetch_list=[out_int, out_float]) + return res + + def test_dtype_is_expected(self): + res_int, res_float = self.run_api() + self.assertEqual( + res_int.dtype.__str__(), self.expected_out_dtype_with_int + ) + self.assertEqual( + res_float.dtype.__str__(), self.expected_out_dtype_with_float + ) + # self.assertEqual(res_complex.dtype.__str__(), self.expected_out_dtype_with_complex) + + +create_test_case(TestTensorAddScalar, 'bool', 'int64', 'float32', 'complex64') +create_test_case(TestTensorAddScalar, 'uint8', 'uint8', 'float32', 'complex64') +create_test_case(TestTensorAddScalar, 'int8', 'int8', 'float32', 'complex64') +create_test_case(TestTensorAddScalar, 'int32', 'int32', 'float32', 'complex64') +create_test_case(TestTensorAddScalar, 'int64', 'int64', 'float32', 'complex64') +create_test_case( + TestTensorAddScalar, 'float16', 'float16', 'float16', 'complex64' +) +create_test_case( + TestTensorAddScalar, 'bfloat16', 'uint16', 'uint16', 'complex64' +) +create_test_case( + TestTensorAddScalar, 'float64', 'float64', 'float64', 'complex128' +) +create_test_case( + TestTensorAddScalar, 'complex64', 'complex64', 'complex64', 'complex64' +) +create_test_case( + TestTensorAddScalar, 'complex128', 'complex128', 'complex128', 'complex128' +) + + +class TestTensorSubScalar(unittest.TestCase): + def setUp(self): + paddle.enable_static() + self.set_dtype() + self.exe = paddle.static.Executor() + + def set_dtype(self): + self.dtype = 'float32' + self.expected_out_dtype_with_int = 'float32' + self.expected_out_dtype_with_float = 'float32' + self.expected_out_dtype_with_complex = 'complex64' + + def generate_test_value(self): + self.value = paddle.rand([2, 3, 4]).astype(self.dtype) + + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + out_int = self.value - 1 + out_float = self.value - 1.0 + # out_complex = self.value - 2j + res = self.exe.run(prog, fetch_list=[out_int, out_float]) + return res + + def test_dtype_is_expected(self): + res_int, res_float = self.run_api() + self.assertEqual( + res_int.dtype.__str__(), self.expected_out_dtype_with_int + ) + self.assertEqual( + res_float.dtype.__str__(), self.expected_out_dtype_with_float + ) + # self.assertEqual(res_complex.dtype.__str__(), self.expected_out_dtype_with_complex) + + +create_test_case(TestTensorSubScalar, 'bool', 'int64', 'float32', 'complex64') +create_test_case(TestTensorSubScalar, 'uint8', 'uint8', 'float32', 'complex64') +create_test_case(TestTensorSubScalar, 'int8', 'int8', 'float32', 'complex64') +create_test_case(TestTensorSubScalar, 'int32', 'int32', 'float32', 'complex64') +create_test_case(TestTensorSubScalar, 'int64', 'int64', 'float32', 'complex64') +create_test_case( + TestTensorSubScalar, 'float16', 'float16', 'float16', 'complex64' +) +create_test_case( + TestTensorSubScalar, 'bfloat16', 'uint16', 'uint16', 'complex64' +) +create_test_case( + TestTensorSubScalar, 'float64', 'float64', 'float64', 'complex128' +) +create_test_case( + TestTensorSubScalar, 'complex64', 'complex64', 'complex64', 'complex64' +) +create_test_case( + TestTensorSubScalar, 'complex128', 'complex128', 'complex128', 'complex128' +) + + +class TestTensorDivScalar(unittest.TestCase): + def setUp(self): + paddle.enable_static() + self.set_dtype() + self.exe = paddle.static.Executor() + + def set_dtype(self): + self.dtype = 'float32' + self.expected_out_dtype_with_int = 'float32' + self.expected_out_dtype_with_float = 'float32' + self.expected_out_dtype_with_complex = 'complex64' + + def generate_test_value(self): + self.value = paddle.rand([2, 3, 4]).astype(self.dtype) + + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + out_int = self.value / 1 + out_float = self.value / 1.0 + # out_complex = self.value / 2j + res = self.exe.run(prog, fetch_list=[out_int, out_float]) + return res + + def test_dtype_is_expected(self): + res_int, res_float = self.run_api() + self.assertEqual( + res_int.dtype.__str__(), self.expected_out_dtype_with_int + ) + self.assertEqual( + res_float.dtype.__str__(), self.expected_out_dtype_with_float + ) + # self.assertEqual(res_complex.dtype.__str__(), self.expected_out_dtype_with_complex) + + +create_test_case(TestTensorDivScalar, 'bool', 'float32', 'float32', 'complex64') +create_test_case( + TestTensorDivScalar, 'uint8', 'float32', 'float32', 'complex64' +) +create_test_case(TestTensorDivScalar, 'int8', 'float32', 'float32', 'complex64') +create_test_case( + TestTensorDivScalar, 'int32', 'float32', 'float32', 'complex64' +) +create_test_case( + TestTensorDivScalar, 'int64', 'float32', 'float32', 'complex64' +) +create_test_case( + TestTensorDivScalar, 'float16', 'float16', 'float16', 'complex64' +) +create_test_case( + TestTensorDivScalar, 'bfloat16', 'uint16', 'uint16', 'complex64' +) +create_test_case( + TestTensorDivScalar, 'float64', 'float64', 'float64', 'complex128' +) +create_test_case( + TestTensorDivScalar, 'complex64', 'complex64', 'complex64', 'complex64' +) +create_test_case( + TestTensorDivScalar, 'complex128', 'complex128', 'complex128', 'complex128' +) + + +class TestTensorMulScalar(unittest.TestCase): + def setUp(self): + paddle.enable_static() + self.set_dtype() + self.exe = paddle.static.Executor() + + def set_dtype(self): + self.dtype = 'float32' + self.expected_out_dtype_with_int = 'float32' + self.expected_out_dtype_with_float = 'float32' + self.expected_out_dtype_with_complex = 'complex64' + + def generate_test_value(self): + self.value = paddle.rand([2, 3, 4]).astype(self.dtype) + + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + out_int = self.value * 1 + out_float = self.value * 1.0 + # out_complex = self.value * 2j + res = self.exe.run(prog, fetch_list=[out_int, out_float]) + return res + + def test_dtype_is_expected(self): + res_int, res_float = self.run_api() + self.assertEqual( + res_int.dtype.__str__(), self.expected_out_dtype_with_int + ) + self.assertEqual( + res_float.dtype.__str__(), self.expected_out_dtype_with_float + ) + # self.assertEqual(res_complex.dtype.__str__(), self.expected_out_dtype_with_complex) + + +create_test_case(TestTensorMulScalar, 'bool', 'int64', 'float32', 'complex64') +create_test_case(TestTensorMulScalar, 'uint8', 'uint8', 'float32', 'complex64') +create_test_case(TestTensorMulScalar, 'int8', 'int8', 'float32', 'complex64') +create_test_case(TestTensorMulScalar, 'int32', 'int32', 'float32', 'complex64') +create_test_case(TestTensorMulScalar, 'int64', 'int64', 'float32', 'complex64') +create_test_case( + TestTensorMulScalar, 'float16', 'float16', 'float16', 'complex64' +) +create_test_case( + TestTensorMulScalar, 'bfloat16', 'uint16', 'uint16', 'complex64' +) +create_test_case( + TestTensorMulScalar, 'float64', 'float64', 'float64', 'complex128' +) +create_test_case( + TestTensorMulScalar, 'complex64', 'complex64', 'complex64', 'complex64' +) +create_test_case( + TestTensorMulScalar, 'complex128', 'complex128', 'complex128', 'complex128' +) + + +class TestTensorPowScalar(unittest.TestCase): + def setUp(self): + paddle.enable_static() + self.set_dtype() + self.exe = paddle.static.Executor() + + def set_dtype(self): + self.dtype = 'float32' + self.expected_out_dtype_with_int = 'float32' + self.expected_out_dtype_with_float = 'float32' + self.expected_out_dtype_with_complex = 'complex64' + + def generate_test_value(self): + self.value = paddle.rand([2, 3, 4]).astype(self.dtype) + + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + out_int = self.value**1 + out_float = self.value**1.0 + # pow API not support complex + res = self.exe.run(prog, fetch_list=[out_int, out_float]) + return res + + def test_dtype_is_expected(self): + res_int, res_float = self.run_api() + self.assertEqual( + res_int.dtype.__str__(), self.expected_out_dtype_with_int + ) + self.assertEqual( + res_float.dtype.__str__(), self.expected_out_dtype_with_float + ) + # self.assertEqual(res_complex.dtype.__str__(), self.expected_out_dtype_with_complex) + + +# pow API support int32, int64, float64, float32. +create_test_case(TestTensorPowScalar, 'int32', 'int32', 'float32', 'complex64') +create_test_case(TestTensorPowScalar, 'int64', 'int64', 'float32', 'complex64') +create_test_case( + TestTensorPowScalar, 'float64', 'float64', 'float64', 'complex128' +) + + +class TestTensorFloorDivScalar(unittest.TestCase): + def setUp(self): + paddle.enable_static() + self.set_dtype() + self.exe = paddle.static.Executor() + + def set_dtype(self): + self.dtype = 'float32' + self.expected_out_dtype_with_int = 'float32' + self.expected_out_dtype_with_float = 'float32' + self.expected_out_dtype_with_complex = 'complex64' + + def generate_test_value(self): + self.value = paddle.rand([2, 3, 4]).astype(self.dtype) + + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + out_int = self.value // 1 + out_float = self.value // 1.0 + # floor_div API not support complex + res = self.exe.run(prog, fetch_list=[out_int, out_float]) + return res + + def test_dtype_is_expected(self): + res_int, res_float = self.run_api() + self.assertEqual( + res_int.dtype.__str__(), self.expected_out_dtype_with_int + ) + self.assertEqual( + res_float.dtype.__str__(), self.expected_out_dtype_with_float + ) + # self.assertEqual(res_complex.dtype.__str__(), self.expected_out_dtype_with_complex) + + +# floor_div API not support complex64, complex128 +create_test_case( + TestTensorFloorDivScalar, 'bool', 'int64', 'float32', 'complex64' +) +create_test_case( + TestTensorFloorDivScalar, 'uint8', 'uint8', 'float32', 'complex64' +) +create_test_case( + TestTensorFloorDivScalar, 'int8', 'int8', 'float32', 'complex64' +) +create_test_case( + TestTensorFloorDivScalar, 'int32', 'int32', 'float32', 'complex64' +) +create_test_case( + TestTensorFloorDivScalar, 'int64', 'int64', 'float32', 'complex64' +) +create_test_case( + TestTensorFloorDivScalar, 'float16', 'float16', 'float16', 'complex64' +) +create_test_case( + TestTensorFloorDivScalar, 'bfloat16', 'uint16', 'uint16', 'complex64' +) +create_test_case( + TestTensorFloorDivScalar, 'float64', 'float64', 'float64', 'complex128' +) + + +class TestTensorModScalar(unittest.TestCase): + def setUp(self): + paddle.enable_static() + self.set_dtype() + self.exe = paddle.static.Executor() + + def set_dtype(self): + self.dtype = 'float32' + self.expected_out_dtype_with_int = 'float32' + self.expected_out_dtype_with_float = 'float32' + self.expected_out_dtype_with_complex = 'complex64' + + def generate_test_value(self): + self.value = paddle.rand([2, 3, 4]).astype(self.dtype) + + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + out_int = self.value % 1 + out_float = self.value % 1.0 + # mod API not support complex + res = self.exe.run(prog, fetch_list=[out_int, out_float]) + return res + + def test_dtype_is_expected(self): + res_int, res_float = self.run_api() + self.assertEqual( + res_int.dtype.__str__(), self.expected_out_dtype_with_int + ) + self.assertEqual( + res_float.dtype.__str__(), self.expected_out_dtype_with_float + ) + # self.assertEqual(res_complex.dtype.__str__(), self.expected_out_dtype_with_complex) + + +# # mod API support float32, float64, int32, int64 +create_test_case(TestTensorModScalar, 'int32', 'int32', 'float32', 'complex64') +create_test_case(TestTensorModScalar, 'int64', 'int64', 'float32', 'complex64') +create_test_case( + TestTensorModScalar, 'float64', 'float64', 'float64', 'complex128' +) + + +class Test0DTensor(unittest.TestCase): + def test_0d_add_0d(self): + paddle.enable_static() + prog = paddle.static.Program() + exe = paddle.static.Executor() + with paddle.static.program_guard(prog): + a = paddle.ones([], dtype='complex128') + b = paddle.ones([], dtype='complex64') + out = a + b + res = exe.run(prog, fetch_list=[out]) + return res + + if __name__ == '__main__': unittest.main() diff --git a/test/deprecated/legacy_test/test_tensor_type_promotion.py b/test/deprecated/legacy_test/test_tensor_type_promotion.py index 19d26048f6997..8e4e425babb1e 100644 --- a/test/deprecated/legacy_test/test_tensor_type_promotion.py +++ b/test/deprecated/legacy_test/test_tensor_type_promotion.py @@ -20,7 +20,7 @@ class TestTensorTypePromotion(unittest.TestCase): def setUp(self): - self.x = paddle.to_tensor([2, 3]) + self.x = paddle.to_tensor([2, 3], dtype='float16') self.y = paddle.to_tensor([1.0, 2.0]) def add_operator(self): @@ -63,6 +63,1991 @@ def set_dtype(self): globals()[cls_name] = TestPromotion +class TestOperatorOverloadAddInDygraph(unittest.TestCase): + def setUp(self): + paddle.disable_static() + self.set_dtype() + + def set_dtype(self): + self.ldtype = 'float32' + self.rdtype = 'float64' + self.expected_out_dtype = 'float64' + + def generate_test_value(self): + self.l_value = (paddle.randn((4, 3, 2)) * 10).astype(self.ldtype) + self.r_value = (paddle.randn((4, 3, 2)) * 10).astype(self.rdtype) + + def run_api(self): + self.generate_test_value() + + out = self.l_value + self.r_value + out_reverse = self.r_value + self.l_value + + return out, out_reverse + + def test_dtype_is_expected(self): + out, out_reverse = self.run_api() + self.assertEqual( + out.dtype.__str__(), "paddle." + self.expected_out_dtype + ) + self.assertEqual( + out_reverse.dtype.__str__(), "paddle." + self.expected_out_dtype + ) + + +create_test_case( + TestOperatorOverloadAddInDygraph, 'float16', 'float32', 'float32' +) +create_test_case( + TestOperatorOverloadAddInDygraph, 'float16', 'float64', 'float64' +) + +create_test_case( + TestOperatorOverloadAddInDygraph, 'float32', 'float64', 'float64' +) + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case( + TestOperatorOverloadAddInDygraph, 'bfloat16', 'float16', 'float32' + ) + create_test_case( + TestOperatorOverloadAddInDygraph, 'bfloat16', 'float32', 'float32' + ) + create_test_case( + TestOperatorOverloadAddInDygraph, 'bfloat16', 'float64', 'float64' + ) + create_test_case( + TestOperatorOverloadAddInDygraph, 'bfloat16', 'complex64', 'complex64' + ) + create_test_case( + TestOperatorOverloadAddInDygraph, 'bfloat16', 'complex128', 'complex128' + ) + +create_test_case( + TestOperatorOverloadAddInDygraph, 'complex64', 'bool', 'complex64' +) +create_test_case( + TestOperatorOverloadAddInDygraph, 'complex64', 'int8', 'complex64' +) +create_test_case( + TestOperatorOverloadAddInDygraph, 'complex64', 'uint8', 'complex64' +) +create_test_case( + TestOperatorOverloadAddInDygraph, 'complex64', 'int16', 'complex64' +) +create_test_case( + TestOperatorOverloadAddInDygraph, 'complex64', 'int32', 'complex64' +) +create_test_case( + TestOperatorOverloadAddInDygraph, 'complex64', 'int64', 'complex64' +) +create_test_case( + TestOperatorOverloadAddInDygraph, 'complex64', 'float16', 'complex64' +) +create_test_case( + TestOperatorOverloadAddInDygraph, 'complex64', 'float32', 'complex64' +) +create_test_case( + TestOperatorOverloadAddInDygraph, 'complex64', 'float64', 'complex128' +) +create_test_case( + TestOperatorOverloadAddInDygraph, 'complex64', 'complex128', 'complex128' +) + +create_test_case( + TestOperatorOverloadAddInDygraph, 'complex128', 'bool', 'complex128' +) +create_test_case( + TestOperatorOverloadAddInDygraph, 'complex128', 'int8', 'complex128' +) +create_test_case( + TestOperatorOverloadAddInDygraph, 'complex128', 'uint8', 'complex128' +) +create_test_case( + TestOperatorOverloadAddInDygraph, 'complex128', 'int16', 'complex128' +) +create_test_case( + TestOperatorOverloadAddInDygraph, 'complex128', 'int32', 'complex128' +) +create_test_case( + TestOperatorOverloadAddInDygraph, 'complex128', 'int64', 'complex128' +) +create_test_case( + TestOperatorOverloadAddInDygraph, 'complex128', 'float16', 'complex128' +) +create_test_case( + TestOperatorOverloadAddInDygraph, 'complex128', 'float32', 'complex128' +) +create_test_case( + TestOperatorOverloadAddInDygraph, 'complex128', 'float64', 'complex128' +) + + +class TestAPIAddInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + + out = paddle.add(self.l_value, self.r_value) + out_reverse = paddle.add(self.r_value, self.l_value) + + return out, out_reverse + + +create_test_case(TestAPIAddInDygraph, 'float16', 'float32', 'float32') +create_test_case(TestAPIAddInDygraph, 'float16', 'float64', 'float64') + +create_test_case(TestAPIAddInDygraph, 'float32', 'float64', 'float64') + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case(TestAPIAddInDygraph, 'bfloat16', 'float16', 'float32') + create_test_case(TestAPIAddInDygraph, 'bfloat16', 'float32', 'float32') + create_test_case(TestAPIAddInDygraph, 'bfloat16', 'float64', 'float64') + create_test_case(TestAPIAddInDygraph, 'bfloat16', 'complex64', 'complex64') + create_test_case( + TestAPIAddInDygraph, 'bfloat16', 'complex128', 'complex128' + ) + +create_test_case(TestAPIAddInDygraph, 'complex64', 'bool', 'complex64') +create_test_case(TestAPIAddInDygraph, 'complex64', 'int8', 'complex64') +create_test_case(TestAPIAddInDygraph, 'complex64', 'uint8', 'complex64') +create_test_case(TestAPIAddInDygraph, 'complex64', 'int16', 'complex64') +create_test_case(TestAPIAddInDygraph, 'complex64', 'int32', 'complex64') +create_test_case(TestAPIAddInDygraph, 'complex64', 'int64', 'complex64') +create_test_case(TestAPIAddInDygraph, 'complex64', 'float16', 'complex64') +create_test_case(TestAPIAddInDygraph, 'complex64', 'float32', 'complex64') +create_test_case(TestAPIAddInDygraph, 'complex64', 'float64', 'complex128') +create_test_case(TestAPIAddInDygraph, 'complex64', 'complex128', 'complex128') + +create_test_case(TestAPIAddInDygraph, 'complex128', 'bool', 'complex128') +create_test_case(TestAPIAddInDygraph, 'complex128', 'int8', 'complex128') +create_test_case(TestAPIAddInDygraph, 'complex128', 'uint8', 'complex128') +create_test_case(TestAPIAddInDygraph, 'complex128', 'int16', 'complex128') +create_test_case(TestAPIAddInDygraph, 'complex128', 'int32', 'complex128') +create_test_case(TestAPIAddInDygraph, 'complex128', 'int64', 'complex128') +create_test_case(TestAPIAddInDygraph, 'complex128', 'float16', 'complex128') +create_test_case(TestAPIAddInDygraph, 'complex128', 'float32', 'complex128') +create_test_case(TestAPIAddInDygraph, 'complex128', 'float64', 'complex128') + + +class TestAPIAddInplaceInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + out = self.l_value.add_(self.r_value) + + self.generate_test_value() + out_reverse = self.r_value.add_(self.l_value) + + return out, out_reverse + + +create_test_case(TestAPIAddInplaceInDygraph, 'float16', 'float32', 'float32') +create_test_case(TestAPIAddInplaceInDygraph, 'float16', 'float64', 'float64') + +create_test_case(TestAPIAddInplaceInDygraph, 'float32', 'float64', 'float64') + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case( + TestAPIAddInplaceInDygraph, 'bfloat16', 'float16', 'float32' + ) + create_test_case( + TestAPIAddInplaceInDygraph, 'bfloat16', 'float32', 'float32' + ) + create_test_case( + TestAPIAddInplaceInDygraph, 'bfloat16', 'float64', 'float64' + ) + create_test_case( + TestAPIAddInplaceInDygraph, 'bfloat16', 'complex64', 'complex64' + ) + create_test_case( + TestAPIAddInplaceInDygraph, 'bfloat16', 'complex128', 'complex128' + ) + +create_test_case(TestAPIAddInplaceInDygraph, 'complex64', 'bool', 'complex64') +create_test_case(TestAPIAddInplaceInDygraph, 'complex64', 'int8', 'complex64') +create_test_case(TestAPIAddInplaceInDygraph, 'complex64', 'uint8', 'complex64') +create_test_case(TestAPIAddInplaceInDygraph, 'complex64', 'int16', 'complex64') +create_test_case(TestAPIAddInplaceInDygraph, 'complex64', 'int32', 'complex64') +create_test_case(TestAPIAddInplaceInDygraph, 'complex64', 'int64', 'complex64') +create_test_case( + TestAPIAddInplaceInDygraph, 'complex64', 'float16', 'complex64' +) +create_test_case( + TestAPIAddInplaceInDygraph, 'complex64', 'float32', 'complex64' +) +create_test_case( + TestAPIAddInplaceInDygraph, 'complex64', 'float64', 'complex128' +) +create_test_case( + TestAPIAddInplaceInDygraph, 'complex64', 'complex128', 'complex128' +) + +create_test_case(TestAPIAddInplaceInDygraph, 'complex128', 'bool', 'complex128') +create_test_case(TestAPIAddInplaceInDygraph, 'complex128', 'int8', 'complex128') +create_test_case( + TestAPIAddInplaceInDygraph, 'complex128', 'uint8', 'complex128' +) +create_test_case( + TestAPIAddInplaceInDygraph, 'complex128', 'int16', 'complex128' +) +create_test_case( + TestAPIAddInplaceInDygraph, 'complex128', 'int32', 'complex128' +) +create_test_case( + TestAPIAddInplaceInDygraph, 'complex128', 'int64', 'complex128' +) +create_test_case( + TestAPIAddInplaceInDygraph, 'complex128', 'float16', 'complex128' +) +create_test_case( + TestAPIAddInplaceInDygraph, 'complex128', 'float32', 'complex128' +) +create_test_case( + TestAPIAddInplaceInDygraph, 'complex128', 'float64', 'complex128' +) + + +class TestOperatorOverloadSubInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + + out = self.l_value - self.r_value + out_reverse = self.r_value - self.l_value + + return out, out_reverse + + +create_test_case( + TestOperatorOverloadSubInDygraph, 'float16', 'float32', 'float32' +) +create_test_case( + TestOperatorOverloadSubInDygraph, 'float16', 'float64', 'float64' +) + +create_test_case( + TestOperatorOverloadSubInDygraph, 'float32', 'float64', 'float64' +) + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case( + TestOperatorOverloadSubInDygraph, 'bfloat16', 'float16', 'float32' + ) + create_test_case( + TestOperatorOverloadSubInDygraph, 'bfloat16', 'float32', 'float32' + ) + create_test_case( + TestOperatorOverloadSubInDygraph, 'bfloat16', 'float64', 'float64' + ) + create_test_case( + TestOperatorOverloadSubInDygraph, 'bfloat16', 'complex64', 'complex64' + ) + create_test_case( + TestOperatorOverloadSubInDygraph, 'bfloat16', 'complex128', 'complex128' + ) + +create_test_case( + TestOperatorOverloadSubInDygraph, 'complex64', 'bool', 'complex64' +) +create_test_case( + TestOperatorOverloadSubInDygraph, 'complex64', 'int8', 'complex64' +) +create_test_case( + TestOperatorOverloadSubInDygraph, 'complex64', 'uint8', 'complex64' +) +create_test_case( + TestOperatorOverloadSubInDygraph, 'complex64', 'int16', 'complex64' +) +create_test_case( + TestOperatorOverloadSubInDygraph, 'complex64', 'int32', 'complex64' +) +create_test_case( + TestOperatorOverloadSubInDygraph, 'complex64', 'int64', 'complex64' +) +create_test_case( + TestOperatorOverloadSubInDygraph, 'complex64', 'float16', 'complex64' +) +create_test_case( + TestOperatorOverloadSubInDygraph, 'complex64', 'float32', 'complex64' +) +create_test_case( + TestOperatorOverloadSubInDygraph, 'complex64', 'float64', 'complex128' +) +create_test_case( + TestOperatorOverloadSubInDygraph, 'complex64', 'complex128', 'complex128' +) + +create_test_case( + TestOperatorOverloadSubInDygraph, 'complex128', 'bool', 'complex128' +) +create_test_case( + TestOperatorOverloadSubInDygraph, 'complex128', 'int8', 'complex128' +) +create_test_case( + TestOperatorOverloadSubInDygraph, 'complex128', 'uint8', 'complex128' +) +create_test_case( + TestOperatorOverloadSubInDygraph, 'complex128', 'int16', 'complex128' +) +create_test_case( + TestOperatorOverloadSubInDygraph, 'complex128', 'int32', 'complex128' +) +create_test_case( + TestOperatorOverloadSubInDygraph, 'complex128', 'int64', 'complex128' +) +create_test_case( + TestOperatorOverloadSubInDygraph, 'complex128', 'float16', 'complex128' +) +create_test_case( + TestOperatorOverloadSubInDygraph, 'complex128', 'float32', 'complex128' +) +create_test_case( + TestOperatorOverloadSubInDygraph, 'complex128', 'float64', 'complex128' +) + + +class TestAPISubInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + + out = paddle.subtract(self.l_value, self.r_value) + out_reverse = paddle.subtract(self.r_value, self.l_value) + + return out, out_reverse + + +create_test_case(TestAPISubInDygraph, 'float16', 'float32', 'float32') +create_test_case(TestAPISubInDygraph, 'float16', 'float64', 'float64') + +create_test_case(TestAPISubInDygraph, 'float32', 'float64', 'float64') + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case(TestAPISubInDygraph, 'bfloat16', 'float16', 'float32') + create_test_case(TestAPISubInDygraph, 'bfloat16', 'float32', 'float32') + create_test_case(TestAPISubInDygraph, 'bfloat16', 'float64', 'float64') + create_test_case(TestAPISubInDygraph, 'bfloat16', 'complex64', 'complex64') + create_test_case( + TestAPISubInDygraph, 'bfloat16', 'complex128', 'complex128' + ) + +create_test_case(TestAPISubInDygraph, 'complex64', 'bool', 'complex64') +create_test_case(TestAPISubInDygraph, 'complex64', 'int8', 'complex64') +create_test_case(TestAPISubInDygraph, 'complex64', 'uint8', 'complex64') +create_test_case(TestAPISubInDygraph, 'complex64', 'int16', 'complex64') +create_test_case(TestAPISubInDygraph, 'complex64', 'int32', 'complex64') +create_test_case(TestAPISubInDygraph, 'complex64', 'int64', 'complex64') +create_test_case(TestAPISubInDygraph, 'complex64', 'float16', 'complex64') +create_test_case(TestAPISubInDygraph, 'complex64', 'float32', 'complex64') +create_test_case(TestAPISubInDygraph, 'complex64', 'float64', 'complex128') +create_test_case(TestAPISubInDygraph, 'complex64', 'complex128', 'complex128') + +create_test_case(TestAPISubInDygraph, 'complex128', 'bool', 'complex128') +create_test_case(TestAPISubInDygraph, 'complex128', 'int8', 'complex128') +create_test_case(TestAPISubInDygraph, 'complex128', 'uint8', 'complex128') +create_test_case(TestAPISubInDygraph, 'complex128', 'int16', 'complex128') +create_test_case(TestAPISubInDygraph, 'complex128', 'int32', 'complex128') +create_test_case(TestAPISubInDygraph, 'complex128', 'int64', 'complex128') +create_test_case(TestAPISubInDygraph, 'complex128', 'float16', 'complex128') +create_test_case(TestAPISubInDygraph, 'complex128', 'float32', 'complex128') +create_test_case(TestAPISubInDygraph, 'complex128', 'float64', 'complex128') + + +class TestAPISubInplaceInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + out = self.l_value.subtract_(self.r_value) + + self.generate_test_value() + out_reverse = self.r_value.subtract_(self.l_value) + + return out, out_reverse + + +create_test_case(TestAPISubInplaceInDygraph, 'float16', 'float32', 'float32') +create_test_case(TestAPISubInplaceInDygraph, 'float16', 'float64', 'float64') + +create_test_case(TestAPISubInplaceInDygraph, 'float32', 'float64', 'float64') + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case( + TestAPISubInplaceInDygraph, 'bfloat16', 'float16', 'float32' + ) + create_test_case( + TestAPISubInplaceInDygraph, 'bfloat16', 'float32', 'float32' + ) + create_test_case( + TestAPISubInplaceInDygraph, 'bfloat16', 'float64', 'float64' + ) + create_test_case( + TestAPISubInplaceInDygraph, 'bfloat16', 'complex64', 'complex64' + ) + create_test_case( + TestAPISubInplaceInDygraph, 'bfloat16', 'complex128', 'complex128' + ) + +create_test_case(TestAPISubInplaceInDygraph, 'complex64', 'bool', 'complex64') +create_test_case(TestAPISubInplaceInDygraph, 'complex64', 'int8', 'complex64') +create_test_case(TestAPISubInplaceInDygraph, 'complex64', 'uint8', 'complex64') +create_test_case(TestAPISubInplaceInDygraph, 'complex64', 'int16', 'complex64') +create_test_case(TestAPISubInplaceInDygraph, 'complex64', 'int32', 'complex64') +create_test_case(TestAPISubInplaceInDygraph, 'complex64', 'int64', 'complex64') +create_test_case( + TestAPISubInplaceInDygraph, 'complex64', 'float16', 'complex64' +) +create_test_case( + TestAPISubInplaceInDygraph, 'complex64', 'float32', 'complex64' +) +create_test_case( + TestAPISubInplaceInDygraph, 'complex64', 'float64', 'complex128' +) +create_test_case( + TestAPISubInplaceInDygraph, 'complex64', 'complex128', 'complex128' +) + +create_test_case(TestAPISubInplaceInDygraph, 'complex128', 'bool', 'complex128') +create_test_case(TestAPISubInplaceInDygraph, 'complex128', 'int8', 'complex128') +create_test_case( + TestAPISubInplaceInDygraph, 'complex128', 'uint8', 'complex128' +) +create_test_case( + TestAPISubInplaceInDygraph, 'complex128', 'int16', 'complex128' +) +create_test_case( + TestAPISubInplaceInDygraph, 'complex128', 'int32', 'complex128' +) +create_test_case( + TestAPISubInplaceInDygraph, 'complex128', 'int64', 'complex128' +) +create_test_case( + TestAPISubInplaceInDygraph, 'complex128', 'float16', 'complex128' +) +create_test_case( + TestAPISubInplaceInDygraph, 'complex128', 'float32', 'complex128' +) +create_test_case( + TestAPISubInplaceInDygraph, 'complex128', 'float64', 'complex128' +) + + +class TestOperatorOverloadMulInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + + out = self.l_value * self.r_value + out_reverse = self.r_value * self.l_value + + return out, out_reverse + + +create_test_case( + TestOperatorOverloadMulInDygraph, 'float16', 'float32', 'float32' +) +create_test_case( + TestOperatorOverloadMulInDygraph, 'float16', 'float64', 'float64' +) + +create_test_case( + TestOperatorOverloadMulInDygraph, 'float32', 'float64', 'float64' +) + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case( + TestOperatorOverloadMulInDygraph, 'bfloat16', 'float16', 'float32' + ) + create_test_case( + TestOperatorOverloadMulInDygraph, 'bfloat16', 'float32', 'float32' + ) + create_test_case( + TestOperatorOverloadMulInDygraph, 'bfloat16', 'float64', 'float64' + ) + create_test_case( + TestOperatorOverloadMulInDygraph, 'bfloat16', 'complex64', 'complex64' + ) + create_test_case( + TestOperatorOverloadMulInDygraph, 'bfloat16', 'complex128', 'complex128' + ) + +create_test_case( + TestOperatorOverloadMulInDygraph, 'complex64', 'bool', 'complex64' +) +create_test_case( + TestOperatorOverloadMulInDygraph, 'complex64', 'int8', 'complex64' +) +create_test_case( + TestOperatorOverloadMulInDygraph, 'complex64', 'uint8', 'complex64' +) +create_test_case( + TestOperatorOverloadMulInDygraph, 'complex64', 'int16', 'complex64' +) +create_test_case( + TestOperatorOverloadMulInDygraph, 'complex64', 'int32', 'complex64' +) +create_test_case( + TestOperatorOverloadMulInDygraph, 'complex64', 'int64', 'complex64' +) +create_test_case( + TestOperatorOverloadMulInDygraph, 'complex64', 'float16', 'complex64' +) +create_test_case( + TestOperatorOverloadMulInDygraph, 'complex64', 'float32', 'complex64' +) +create_test_case( + TestOperatorOverloadMulInDygraph, 'complex64', 'float64', 'complex128' +) +create_test_case( + TestOperatorOverloadMulInDygraph, 'complex64', 'complex128', 'complex128' +) + +create_test_case( + TestOperatorOverloadMulInDygraph, 'complex128', 'bool', 'complex128' +) +create_test_case( + TestOperatorOverloadMulInDygraph, 'complex128', 'int8', 'complex128' +) +create_test_case( + TestOperatorOverloadMulInDygraph, 'complex128', 'uint8', 'complex128' +) +create_test_case( + TestOperatorOverloadMulInDygraph, 'complex128', 'int16', 'complex128' +) +create_test_case( + TestOperatorOverloadMulInDygraph, 'complex128', 'int32', 'complex128' +) +create_test_case( + TestOperatorOverloadMulInDygraph, 'complex128', 'int64', 'complex128' +) +create_test_case( + TestOperatorOverloadMulInDygraph, 'complex128', 'float16', 'complex128' +) +create_test_case( + TestOperatorOverloadMulInDygraph, 'complex128', 'float32', 'complex128' +) +create_test_case( + TestOperatorOverloadMulInDygraph, 'complex128', 'float64', 'complex128' +) + + +class TestAPIMulInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + + out = paddle.multiply(self.l_value, self.r_value) + out_reverse = paddle.multiply(self.r_value, self.l_value) + + return out, out_reverse + + +create_test_case(TestAPIMulInDygraph, 'float16', 'float32', 'float32') +create_test_case(TestAPIMulInDygraph, 'float16', 'float64', 'float64') + +create_test_case(TestAPIMulInDygraph, 'float32', 'float64', 'float64') + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case(TestAPIMulInDygraph, 'bfloat16', 'float16', 'float32') + create_test_case(TestAPIMulInDygraph, 'bfloat16', 'float32', 'float32') + create_test_case(TestAPIMulInDygraph, 'bfloat16', 'float64', 'float64') + create_test_case(TestAPIMulInDygraph, 'bfloat16', 'complex64', 'complex64') + create_test_case( + TestAPIMulInDygraph, 'bfloat16', 'complex128', 'complex128' + ) + +create_test_case(TestAPIMulInDygraph, 'complex64', 'bool', 'complex64') +create_test_case(TestAPIMulInDygraph, 'complex64', 'int8', 'complex64') +create_test_case(TestAPIMulInDygraph, 'complex64', 'uint8', 'complex64') +create_test_case(TestAPIMulInDygraph, 'complex64', 'int16', 'complex64') +create_test_case(TestAPIMulInDygraph, 'complex64', 'int32', 'complex64') +create_test_case(TestAPIMulInDygraph, 'complex64', 'int64', 'complex64') +create_test_case(TestAPIMulInDygraph, 'complex64', 'float16', 'complex64') +create_test_case(TestAPIMulInDygraph, 'complex64', 'float32', 'complex64') +create_test_case(TestAPIMulInDygraph, 'complex64', 'float64', 'complex128') +create_test_case(TestAPIMulInDygraph, 'complex64', 'complex128', 'complex128') + +create_test_case(TestAPIMulInDygraph, 'complex128', 'bool', 'complex128') +create_test_case(TestAPIMulInDygraph, 'complex128', 'int8', 'complex128') +create_test_case(TestAPIMulInDygraph, 'complex128', 'uint8', 'complex128') +create_test_case(TestAPIMulInDygraph, 'complex128', 'int16', 'complex128') +create_test_case(TestAPIMulInDygraph, 'complex128', 'int32', 'complex128') +create_test_case(TestAPIMulInDygraph, 'complex128', 'int64', 'complex128') +create_test_case(TestAPIMulInDygraph, 'complex128', 'float16', 'complex128') +create_test_case(TestAPIMulInDygraph, 'complex128', 'float32', 'complex128') +create_test_case(TestAPIMulInDygraph, 'complex128', 'float64', 'complex128') + + +class TestAPIMulInplaceInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + out = self.l_value.multiply_(self.r_value) + + self.generate_test_value() + out_reverse = self.r_value.multiply_(self.l_value) + + return out, out_reverse + + +create_test_case(TestAPIMulInplaceInDygraph, 'float16', 'float32', 'float32') +create_test_case(TestAPIMulInplaceInDygraph, 'float16', 'float64', 'float64') + +create_test_case(TestAPIMulInplaceInDygraph, 'float32', 'float64', 'float64') + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case( + TestAPIMulInplaceInDygraph, 'bfloat16', 'float16', 'float32' + ) + create_test_case( + TestAPIMulInplaceInDygraph, 'bfloat16', 'float32', 'float32' + ) + create_test_case( + TestAPIMulInplaceInDygraph, 'bfloat16', 'float64', 'float64' + ) + create_test_case( + TestAPIMulInplaceInDygraph, 'bfloat16', 'complex64', 'complex64' + ) + create_test_case( + TestAPIMulInplaceInDygraph, 'bfloat16', 'complex128', 'complex128' + ) + +create_test_case(TestAPIMulInplaceInDygraph, 'complex64', 'bool', 'complex64') +create_test_case(TestAPIMulInplaceInDygraph, 'complex64', 'int8', 'complex64') +create_test_case(TestAPIMulInplaceInDygraph, 'complex64', 'uint8', 'complex64') +create_test_case(TestAPIMulInplaceInDygraph, 'complex64', 'int16', 'complex64') +create_test_case(TestAPIMulInplaceInDygraph, 'complex64', 'int32', 'complex64') +create_test_case(TestAPIMulInplaceInDygraph, 'complex64', 'int64', 'complex64') +create_test_case( + TestAPIMulInplaceInDygraph, 'complex64', 'float16', 'complex64' +) +create_test_case( + TestAPIMulInplaceInDygraph, 'complex64', 'float32', 'complex64' +) +create_test_case( + TestAPIMulInplaceInDygraph, 'complex64', 'float64', 'complex128' +) +create_test_case( + TestAPIMulInplaceInDygraph, 'complex64', 'complex128', 'complex128' +) + +create_test_case(TestAPIMulInplaceInDygraph, 'complex128', 'bool', 'complex128') +create_test_case(TestAPIMulInplaceInDygraph, 'complex128', 'int8', 'complex128') +create_test_case( + TestAPIMulInplaceInDygraph, 'complex128', 'uint8', 'complex128' +) +create_test_case( + TestAPIMulInplaceInDygraph, 'complex128', 'int16', 'complex128' +) +create_test_case( + TestAPIMulInplaceInDygraph, 'complex128', 'int32', 'complex128' +) +create_test_case( + TestAPIMulInplaceInDygraph, 'complex128', 'int64', 'complex128' +) +create_test_case( + TestAPIMulInplaceInDygraph, 'complex128', 'float16', 'complex128' +) +create_test_case( + TestAPIMulInplaceInDygraph, 'complex128', 'float32', 'complex128' +) +create_test_case( + TestAPIMulInplaceInDygraph, 'complex128', 'float64', 'complex128' +) + + +class TestOperatorOverloadDivInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + + out = self.l_value / self.r_value + out_reverse = self.r_value / self.l_value + + return out, out_reverse + + +create_test_case( + TestOperatorOverloadDivInDygraph, 'float16', 'float32', 'float32' +) +create_test_case( + TestOperatorOverloadDivInDygraph, 'float16', 'float64', 'float64' +) + +create_test_case( + TestOperatorOverloadDivInDygraph, 'float32', 'float64', 'float64' +) + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case( + TestOperatorOverloadDivInDygraph, 'bfloat16', 'float16', 'float32' + ) + create_test_case( + TestOperatorOverloadDivInDygraph, 'bfloat16', 'float32', 'float32' + ) + create_test_case( + TestOperatorOverloadDivInDygraph, 'bfloat16', 'float64', 'float64' + ) + create_test_case( + TestOperatorOverloadDivInDygraph, 'bfloat16', 'complex64', 'complex64' + ) + create_test_case( + TestOperatorOverloadDivInDygraph, 'bfloat16', 'complex128', 'complex128' + ) + +create_test_case( + TestOperatorOverloadDivInDygraph, 'complex64', 'bool', 'complex64' +) +create_test_case( + TestOperatorOverloadDivInDygraph, 'complex64', 'int8', 'complex64' +) +create_test_case( + TestOperatorOverloadDivInDygraph, 'complex64', 'uint8', 'complex64' +) +create_test_case( + TestOperatorOverloadDivInDygraph, 'complex64', 'int16', 'complex64' +) +create_test_case( + TestOperatorOverloadDivInDygraph, 'complex64', 'int32', 'complex64' +) +create_test_case( + TestOperatorOverloadDivInDygraph, 'complex64', 'int64', 'complex64' +) +create_test_case( + TestOperatorOverloadDivInDygraph, 'complex64', 'float16', 'complex64' +) +create_test_case( + TestOperatorOverloadDivInDygraph, 'complex64', 'float32', 'complex64' +) +create_test_case( + TestOperatorOverloadDivInDygraph, 'complex64', 'float64', 'complex128' +) +create_test_case( + TestOperatorOverloadDivInDygraph, 'complex64', 'complex128', 'complex128' +) + +create_test_case( + TestOperatorOverloadDivInDygraph, 'complex128', 'bool', 'complex128' +) +create_test_case( + TestOperatorOverloadDivInDygraph, 'complex128', 'int8', 'complex128' +) +create_test_case( + TestOperatorOverloadDivInDygraph, 'complex128', 'uint8', 'complex128' +) +create_test_case( + TestOperatorOverloadDivInDygraph, 'complex128', 'int16', 'complex128' +) +create_test_case( + TestOperatorOverloadDivInDygraph, 'complex128', 'int32', 'complex128' +) +create_test_case( + TestOperatorOverloadDivInDygraph, 'complex128', 'int64', 'complex128' +) +create_test_case( + TestOperatorOverloadDivInDygraph, 'complex128', 'float16', 'complex128' +) +create_test_case( + TestOperatorOverloadDivInDygraph, 'complex128', 'float32', 'complex128' +) +create_test_case( + TestOperatorOverloadDivInDygraph, 'complex128', 'float64', 'complex128' +) + + +class TestAPIDivInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + + out = paddle.divide(self.l_value, self.r_value) + out_reverse = paddle.divide(self.r_value, self.l_value) + + return out, out_reverse + + +create_test_case(TestAPIDivInDygraph, 'float16', 'float32', 'float32') +create_test_case(TestAPIDivInDygraph, 'float16', 'float64', 'float64') + +create_test_case(TestAPIDivInDygraph, 'float32', 'float64', 'float64') + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case(TestAPIDivInDygraph, 'bfloat16', 'float16', 'float32') + create_test_case(TestAPIDivInDygraph, 'bfloat16', 'float32', 'float32') + create_test_case(TestAPIDivInDygraph, 'bfloat16', 'float64', 'float64') + create_test_case(TestAPIDivInDygraph, 'bfloat16', 'complex64', 'complex64') + create_test_case( + TestAPIDivInDygraph, 'bfloat16', 'complex128', 'complex128' + ) + +create_test_case(TestAPIDivInDygraph, 'complex64', 'bool', 'complex64') +create_test_case(TestAPIDivInDygraph, 'complex64', 'int8', 'complex64') +create_test_case(TestAPIDivInDygraph, 'complex64', 'uint8', 'complex64') +create_test_case(TestAPIDivInDygraph, 'complex64', 'int16', 'complex64') +create_test_case(TestAPIDivInDygraph, 'complex64', 'int32', 'complex64') +create_test_case(TestAPIDivInDygraph, 'complex64', 'int64', 'complex64') +create_test_case(TestAPIDivInDygraph, 'complex64', 'float16', 'complex64') +create_test_case(TestAPIDivInDygraph, 'complex64', 'float32', 'complex64') +create_test_case(TestAPIDivInDygraph, 'complex64', 'float64', 'complex128') +create_test_case(TestAPIDivInDygraph, 'complex64', 'complex128', 'complex128') + +create_test_case(TestAPIDivInDygraph, 'complex128', 'bool', 'complex128') +create_test_case(TestAPIDivInDygraph, 'complex128', 'int8', 'complex128') +create_test_case(TestAPIDivInDygraph, 'complex128', 'uint8', 'complex128') +create_test_case(TestAPIDivInDygraph, 'complex128', 'int16', 'complex128') +create_test_case(TestAPIDivInDygraph, 'complex128', 'int32', 'complex128') +create_test_case(TestAPIDivInDygraph, 'complex128', 'int64', 'complex128') +create_test_case(TestAPIDivInDygraph, 'complex128', 'float16', 'complex128') +create_test_case(TestAPIDivInDygraph, 'complex128', 'float32', 'complex128') +create_test_case(TestAPIDivInDygraph, 'complex128', 'float64', 'complex128') + + +class TestAPIDivInplaceInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + out = self.l_value.divide_(self.r_value) + + self.generate_test_value() + out_reverse = self.r_value.divide_(self.l_value) + + return out, out_reverse + + +create_test_case(TestAPIDivInplaceInDygraph, 'float16', 'float32', 'float32') +create_test_case(TestAPIDivInplaceInDygraph, 'float16', 'float64', 'float64') + +create_test_case(TestAPIDivInplaceInDygraph, 'float32', 'float64', 'float64') + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case( + TestAPIDivInplaceInDygraph, 'bfloat16', 'float16', 'float32' + ) + create_test_case( + TestAPIDivInplaceInDygraph, 'bfloat16', 'float32', 'float32' + ) + create_test_case( + TestAPIDivInplaceInDygraph, 'bfloat16', 'float64', 'float64' + ) + create_test_case( + TestAPIDivInplaceInDygraph, 'bfloat16', 'complex64', 'complex64' + ) + create_test_case( + TestAPIDivInplaceInDygraph, 'bfloat16', 'complex128', 'complex128' + ) + +create_test_case(TestAPIDivInplaceInDygraph, 'complex64', 'bool', 'complex64') +create_test_case(TestAPIDivInplaceInDygraph, 'complex64', 'int8', 'complex64') +create_test_case(TestAPIDivInplaceInDygraph, 'complex64', 'uint8', 'complex64') +create_test_case(TestAPIDivInplaceInDygraph, 'complex64', 'int16', 'complex64') +create_test_case(TestAPIDivInplaceInDygraph, 'complex64', 'int32', 'complex64') +create_test_case(TestAPIDivInplaceInDygraph, 'complex64', 'int64', 'complex64') +create_test_case( + TestAPIDivInplaceInDygraph, 'complex64', 'float16', 'complex64' +) +create_test_case( + TestAPIDivInplaceInDygraph, 'complex64', 'float32', 'complex64' +) +create_test_case( + TestAPIDivInplaceInDygraph, 'complex64', 'float64', 'complex128' +) +create_test_case( + TestAPIDivInplaceInDygraph, 'complex64', 'complex128', 'complex128' +) + +create_test_case(TestAPIDivInplaceInDygraph, 'complex128', 'bool', 'complex128') +create_test_case(TestAPIDivInplaceInDygraph, 'complex128', 'int8', 'complex128') +create_test_case( + TestAPIDivInplaceInDygraph, 'complex128', 'uint8', 'complex128' +) +create_test_case( + TestAPIDivInplaceInDygraph, 'complex128', 'int16', 'complex128' +) +create_test_case( + TestAPIDivInplaceInDygraph, 'complex128', 'int32', 'complex128' +) +create_test_case( + TestAPIDivInplaceInDygraph, 'complex128', 'int64', 'complex128' +) +create_test_case( + TestAPIDivInplaceInDygraph, 'complex128', 'float16', 'complex128' +) +create_test_case( + TestAPIDivInplaceInDygraph, 'complex128', 'float32', 'complex128' +) +create_test_case( + TestAPIDivInplaceInDygraph, 'complex128', 'float64', 'complex128' +) + + +class TestOperatorOverloadPowInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + + out = self.l_value**self.r_value + out_reverse = self.r_value**self.l_value + + return out, out_reverse + + +create_test_case( + TestOperatorOverloadPowInDygraph, 'float16', 'float32', 'float32' +) +create_test_case( + TestOperatorOverloadPowInDygraph, 'float16', 'float64', 'float64' +) + +create_test_case( + TestOperatorOverloadPowInDygraph, 'float32', 'float64', 'float64' +) + + +class TestAPIPowInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + + out = paddle.pow(self.l_value, self.r_value) + out_reverse = paddle.pow(self.r_value, self.l_value) + + return out, out_reverse + + +create_test_case(TestAPIPowInDygraph, 'float16', 'float32', 'float32') +create_test_case(TestAPIPowInDygraph, 'float16', 'float64', 'float64') + +create_test_case(TestAPIPowInDygraph, 'float32', 'float64', 'float64') + + +class TestOperatorOverloadFloorDivInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + + out = self.l_value // self.r_value + out_reverse = self.r_value // self.l_value + + return out, out_reverse + + +create_test_case( + TestOperatorOverloadFloorDivInDygraph, 'float16', 'float32', 'float32' +) +create_test_case( + TestOperatorOverloadFloorDivInDygraph, 'float16', 'float64', 'float64' +) + +create_test_case( + TestOperatorOverloadFloorDivInDygraph, 'float32', 'float64', 'float64' +) + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case( + TestOperatorOverloadFloorDivInDygraph, 'bfloat16', 'float16', 'float32' + ) + create_test_case( + TestOperatorOverloadFloorDivInDygraph, 'bfloat16', 'float32', 'float32' + ) + create_test_case( + TestOperatorOverloadFloorDivInDygraph, 'bfloat16', 'float64', 'float64' + ) + + +class TestAPIFloorDivInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + + out = paddle.floor_divide(self.l_value, self.r_value) + out_reverse = paddle.floor_divide(self.r_value, self.l_value) + + return out, out_reverse + + +create_test_case(TestAPIFloorDivInDygraph, 'float16', 'float32', 'float32') +create_test_case(TestAPIFloorDivInDygraph, 'float16', 'float64', 'float64') + +create_test_case(TestAPIFloorDivInDygraph, 'float32', 'float64', 'float64') + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case(TestAPIFloorDivInDygraph, 'bfloat16', 'float16', 'float32') + create_test_case(TestAPIFloorDivInDygraph, 'bfloat16', 'float32', 'float32') + create_test_case(TestAPIFloorDivInDygraph, 'bfloat16', 'float64', 'float64') + + +class TestAPIFloorDivInplaceInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + out = self.l_value.floor_divide_(self.r_value) + + self.generate_test_value() + out_reverse = self.r_value.floor_divide_(self.l_value) + + return out, out_reverse + + +create_test_case( + TestAPIFloorDivInplaceInDygraph, 'float16', 'float32', 'float32' +) +create_test_case( + TestAPIFloorDivInplaceInDygraph, 'float16', 'float64', 'float64' +) + +create_test_case( + TestAPIFloorDivInplaceInDygraph, 'float32', 'float64', 'float64' +) + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case( + TestAPIFloorDivInplaceInDygraph, 'bfloat16', 'float16', 'float32' + ) + create_test_case( + TestAPIFloorDivInplaceInDygraph, 'bfloat16', 'float32', 'float32' + ) + create_test_case( + TestAPIFloorDivInplaceInDygraph, 'bfloat16', 'float64', 'float64' + ) + + +class TestOperatorOverloadModInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + + out = self.l_value % self.r_value + out_reverse = self.r_value % self.l_value + + return out, out_reverse + + +create_test_case( + TestOperatorOverloadModInDygraph, 'float16', 'float32', 'float32' +) +create_test_case( + TestOperatorOverloadModInDygraph, 'float16', 'float64', 'float64' +) + +create_test_case( + TestOperatorOverloadModInDygraph, 'float32', 'float64', 'float64' +) + + +class TestAPIModInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + + out = paddle.mod(self.l_value, self.r_value) + out_reverse = paddle.mod(self.r_value, self.l_value) + + return out, out_reverse + + +create_test_case(TestAPIModInDygraph, 'float16', 'float32', 'float32') +create_test_case(TestAPIModInDygraph, 'float16', 'float64', 'float64') + +create_test_case(TestAPIModInDygraph, 'float32', 'float64', 'float64') + + +class TestAPIModInplaceInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + out = self.l_value.mod_(self.r_value) + + self.generate_test_value() + out_reverse = self.r_value.mod_(self.l_value) + + return out, out_reverse + + +create_test_case(TestAPIModInplaceInDygraph, 'float16', 'float32', 'float32') +create_test_case(TestAPIModInplaceInDygraph, 'float16', 'float64', 'float64') + +create_test_case(TestAPIModInplaceInDygraph, 'float32', 'float64', 'float64') + + +class TestOperatorOverloadEqualInDygraph(unittest.TestCase): + def setUp(self): + paddle.disable_static() + self.set_dtype() + + def set_dtype(self): + self.ldtype = 'float32' + self.rdtype = 'float64' + self.expected_out_dtype = 'bool' + + def generate_test_value(self): + self.l_value = (paddle.randn((4, 3, 2)) * 10).astype(self.ldtype) + self.r_value = (paddle.randn((4, 3, 2)) * 10).astype(self.rdtype) + + def run_api(self): + self.generate_test_value() + + out = self.l_value == self.r_value + out_reverse = self.r_value == self.l_value + + return out, out_reverse + + def test_dtype_is_expected(self): + out, out_reverse = self.run_api() + self.assertEqual( + out.dtype.__str__(), "paddle." + self.expected_out_dtype + ) + self.assertEqual( + out_reverse.dtype.__str__(), "paddle." + self.expected_out_dtype + ) + + +create_test_case( + TestOperatorOverloadEqualInDygraph, 'float16', 'float32', 'bool' +) +create_test_case( + TestOperatorOverloadEqualInDygraph, 'float16', 'float64', 'bool' +) + +create_test_case( + TestOperatorOverloadEqualInDygraph, 'float32', 'float64', 'bool' +) + + +class TestAPIEqualInDygraph(TestOperatorOverloadEqualInDygraph): + def run_api(self): + self.generate_test_value() + + out = paddle.equal(self.l_value, self.r_value) + out_reverse = paddle.equal(self.r_value, self.l_value) + + return out, out_reverse + + +create_test_case(TestAPIEqualInDygraph, 'float16', 'float32', 'bool') +create_test_case(TestAPIEqualInDygraph, 'float16', 'float64', 'bool') + +create_test_case(TestAPIEqualInDygraph, 'float32', 'float64', 'bool') + + +class TestAPIEqualInplaceInDygraph(TestOperatorOverloadEqualInDygraph): + def run_api(self): + self.generate_test_value() + out = self.l_value.equal_(self.r_value) + + self.generate_test_value() + out_reverse = self.r_value.equal_(self.l_value) + + return out, out_reverse + + +create_test_case(TestAPIEqualInplaceInDygraph, 'float16', 'float32', 'bool') +create_test_case(TestAPIEqualInplaceInDygraph, 'float16', 'float64', 'bool') + +create_test_case(TestAPIEqualInplaceInDygraph, 'float32', 'float64', 'bool') + + +class TestOperatorOverloadNotEqualInDygraph(TestOperatorOverloadEqualInDygraph): + def run_api(self): + self.generate_test_value() + + out = self.l_value != self.r_value + out_reverse = self.r_value != self.l_value + + return out, out_reverse + + +create_test_case( + TestOperatorOverloadNotEqualInDygraph, 'float16', 'float32', 'bool' +) +create_test_case( + TestOperatorOverloadNotEqualInDygraph, 'float16', 'float64', 'bool' +) + +create_test_case( + TestOperatorOverloadNotEqualInDygraph, 'float32', 'float64', 'bool' +) + + +class TestAPINotEqualInDygraph(TestOperatorOverloadEqualInDygraph): + def run_api(self): + self.generate_test_value() + + out = paddle.not_equal(self.l_value, self.r_value) + out_reverse = paddle.not_equal(self.r_value, self.l_value) + + return out, out_reverse + + +create_test_case(TestAPINotEqualInDygraph, 'float16', 'float32', 'bool') +create_test_case(TestAPINotEqualInDygraph, 'float16', 'float64', 'bool') + +create_test_case(TestAPINotEqualInDygraph, 'float32', 'float64', 'bool') + + +class TestAPINotEqualInplaceInDygraph(TestOperatorOverloadEqualInDygraph): + def run_api(self): + self.generate_test_value() + out = self.l_value.not_equal_(self.r_value) + + self.generate_test_value() + out_reverse = self.r_value.not_equal_(self.l_value) + + return out, out_reverse + + +create_test_case(TestAPINotEqualInplaceInDygraph, 'float16', 'float32', 'bool') +create_test_case(TestAPINotEqualInplaceInDygraph, 'float16', 'float64', 'bool') + +create_test_case(TestAPINotEqualInplaceInDygraph, 'float32', 'float64', 'bool') + + +class TestOperatorOverloadLessThanInDygraph(TestOperatorOverloadEqualInDygraph): + def run_api(self): + self.generate_test_value() + + out = self.l_value < self.r_value + out_reverse = self.r_value < self.l_value + + return out, out_reverse + + +create_test_case( + TestOperatorOverloadLessThanInDygraph, 'float16', 'float32', 'bool' +) +create_test_case( + TestOperatorOverloadLessThanInDygraph, 'float16', 'float64', 'bool' +) + +create_test_case( + TestOperatorOverloadLessThanInDygraph, 'float32', 'float64', 'bool' +) + + +class TestAPILessThanInDygraph(TestOperatorOverloadEqualInDygraph): + def run_api(self): + self.generate_test_value() + + out = paddle.less_than(self.l_value, self.r_value) + out_reverse = paddle.less_than(self.r_value, self.l_value) + + return out, out_reverse + + +create_test_case(TestAPILessThanInDygraph, 'float16', 'float32', 'bool') +create_test_case(TestAPILessThanInDygraph, 'float16', 'float64', 'bool') + +create_test_case(TestAPILessThanInDygraph, 'float32', 'float64', 'bool') + + +class TestAPILessThanInplaceInDygraph(TestOperatorOverloadEqualInDygraph): + def run_api(self): + self.generate_test_value() + out = self.l_value.less_than_(self.r_value) + + self.generate_test_value() + out_reverse = self.r_value.less_than_(self.l_value) + + return out, out_reverse + + +create_test_case(TestAPILessThanInplaceInDygraph, 'float16', 'float32', 'bool') +create_test_case(TestAPILessThanInplaceInDygraph, 'float16', 'float64', 'bool') + +create_test_case(TestAPILessThanInplaceInDygraph, 'float32', 'float64', 'bool') + + +class TestOperatorOverloadLessEqualInDygraph( + TestOperatorOverloadEqualInDygraph +): + def run_api(self): + self.generate_test_value() + + out = self.l_value <= self.r_value + out_reverse = self.r_value <= self.l_value + + return out, out_reverse + + +create_test_case( + TestOperatorOverloadLessEqualInDygraph, 'float16', 'float32', 'bool' +) +create_test_case( + TestOperatorOverloadLessEqualInDygraph, 'float16', 'float64', 'bool' +) + +create_test_case( + TestOperatorOverloadLessEqualInDygraph, 'float32', 'float64', 'bool' +) + + +class TestAPILessEqualInDygraph(TestOperatorOverloadEqualInDygraph): + def run_api(self): + self.generate_test_value() + + out = paddle.less_equal(self.l_value, self.r_value) + out_reverse = paddle.less_equal(self.r_value, self.l_value) + + return out, out_reverse + + +create_test_case(TestAPILessEqualInDygraph, 'float16', 'float32', 'bool') +create_test_case(TestAPILessEqualInDygraph, 'float16', 'float64', 'bool') + +create_test_case(TestAPILessEqualInDygraph, 'float32', 'float64', 'bool') + + +class TestAPILessEqualInplaceInDygraph(TestOperatorOverloadEqualInDygraph): + def run_api(self): + self.generate_test_value() + out = self.l_value.less_equal_(self.r_value) + + self.generate_test_value() + out_reverse = self.r_value.less_equal_(self.l_value) + + return out, out_reverse + + +create_test_case(TestAPILessEqualInplaceInDygraph, 'float16', 'float32', 'bool') +create_test_case(TestAPILessEqualInplaceInDygraph, 'float16', 'float64', 'bool') + +create_test_case(TestAPILessEqualInplaceInDygraph, 'float32', 'float64', 'bool') + + +class TestOperatorOverloadGreaterThanInDygraph( + TestOperatorOverloadEqualInDygraph +): + def run_api(self): + self.generate_test_value() + + out = self.l_value > self.r_value + out_reverse = self.r_value > self.l_value + + return out, out_reverse + + +create_test_case( + TestOperatorOverloadGreaterThanInDygraph, 'float16', 'float32', 'bool' +) +create_test_case( + TestOperatorOverloadGreaterThanInDygraph, 'float16', 'float64', 'bool' +) + +create_test_case( + TestOperatorOverloadGreaterThanInDygraph, 'float32', 'float64', 'bool' +) + + +class TestAPIGreaterThanInDygraph(TestOperatorOverloadEqualInDygraph): + def run_api(self): + self.generate_test_value() + + out = paddle.greater_than(self.l_value, self.r_value) + out_reverse = paddle.greater_than(self.r_value, self.l_value) + + return out, out_reverse + + +create_test_case(TestAPIGreaterThanInDygraph, 'float16', 'float32', 'bool') +create_test_case(TestAPIGreaterThanInDygraph, 'float16', 'float64', 'bool') + +create_test_case(TestAPIGreaterThanInDygraph, 'float32', 'float64', 'bool') + + +class TestAPIGreaterThanInplaceInDygraph(TestOperatorOverloadEqualInDygraph): + def run_api(self): + self.generate_test_value() + out = self.l_value.greater_than_(self.r_value) + + self.generate_test_value() + out_reverse = self.r_value.greater_than_(self.l_value) + + return out, out_reverse + + +create_test_case( + TestAPIGreaterThanInplaceInDygraph, 'float16', 'float32', 'bool' +) +create_test_case( + TestAPIGreaterThanInplaceInDygraph, 'float16', 'float64', 'bool' +) + +create_test_case( + TestAPIGreaterThanInplaceInDygraph, 'float32', 'float64', 'bool' +) + + +class TestOperatorOverloadGreaterEqualInDygraph( + TestOperatorOverloadEqualInDygraph +): + def run_api(self): + self.generate_test_value() + + out = self.l_value >= self.r_value + out_reverse = self.r_value >= self.l_value + + return out, out_reverse + + +create_test_case( + TestOperatorOverloadGreaterEqualInDygraph, 'float16', 'float32', 'bool' +) +create_test_case( + TestOperatorOverloadGreaterEqualInDygraph, 'float16', 'float64', 'bool' +) + +create_test_case( + TestOperatorOverloadGreaterEqualInDygraph, 'float32', 'float64', 'bool' +) + + +class TestAPIGreaterEqualInDygraph(TestOperatorOverloadEqualInDygraph): + def run_api(self): + self.generate_test_value() + + out = paddle.greater_equal(self.l_value, self.r_value) + out_reverse = paddle.greater_equal(self.r_value, self.l_value) + + return out, out_reverse + + +create_test_case(TestAPIGreaterEqualInDygraph, 'float16', 'float32', 'bool') +create_test_case(TestAPIGreaterEqualInDygraph, 'float16', 'float64', 'bool') + +create_test_case(TestAPIGreaterEqualInDygraph, 'float32', 'float64', 'bool') + + +class TestAPIGreaterEqualInplaceInDygraph(TestOperatorOverloadEqualInDygraph): + def run_api(self): + self.generate_test_value() + out = self.l_value.greater_equal_(self.r_value) + + self.generate_test_value() + out_reverse = self.r_value.greater_equal_(self.l_value) + + return out, out_reverse + + +create_test_case( + TestAPIGreaterEqualInplaceInDygraph, 'float16', 'float32', 'bool' +) +create_test_case( + TestAPIGreaterEqualInplaceInDygraph, 'float16', 'float64', 'bool' +) + +create_test_case( + TestAPIGreaterEqualInplaceInDygraph, 'float32', 'float64', 'bool' +) + + +class TestAPILogicalAndInDygraph(TestOperatorOverloadEqualInDygraph): + def run_api(self): + self.generate_test_value() + + out = paddle.logical_and(self.l_value, self.r_value) + out_reverse = paddle.logical_and(self.r_value, self.l_value) + + return out, out_reverse + + +create_test_case(TestAPILogicalAndInDygraph, 'float16', 'float32', 'bool') +create_test_case(TestAPILogicalAndInDygraph, 'float16', 'float64', 'bool') + +create_test_case(TestAPILogicalAndInDygraph, 'float32', 'float64', 'bool') + +create_test_case(TestAPILogicalAndInDygraph, 'complex64', 'bool', 'bool') +create_test_case(TestAPILogicalAndInDygraph, 'complex64', 'int8', 'bool') +create_test_case(TestAPILogicalAndInDygraph, 'complex64', 'int16', 'bool') +create_test_case(TestAPILogicalAndInDygraph, 'complex64', 'int32', 'bool') +create_test_case(TestAPILogicalAndInDygraph, 'complex64', 'int64', 'bool') +create_test_case(TestAPILogicalAndInDygraph, 'complex64', 'float16', 'bool') +create_test_case(TestAPILogicalAndInDygraph, 'complex64', 'float32', 'bool') +create_test_case(TestAPILogicalAndInDygraph, 'complex64', 'float64', 'bool') +create_test_case(TestAPILogicalAndInDygraph, 'complex64', 'complex128', 'bool') + +create_test_case(TestAPILogicalAndInDygraph, 'complex128', 'bool', 'bool') +create_test_case(TestAPILogicalAndInDygraph, 'complex128', 'int8', 'bool') +create_test_case(TestAPILogicalAndInDygraph, 'complex128', 'int16', 'bool') +create_test_case(TestAPILogicalAndInDygraph, 'complex128', 'int32', 'bool') +create_test_case(TestAPILogicalAndInDygraph, 'complex128', 'int64', 'bool') +create_test_case(TestAPILogicalAndInDygraph, 'complex128', 'float16', 'bool') +create_test_case(TestAPILogicalAndInDygraph, 'complex128', 'float32', 'bool') +create_test_case(TestAPILogicalAndInDygraph, 'complex128', 'float64', 'bool') + + +class TestAPILogicalAndInplaceInDygraph(TestOperatorOverloadEqualInDygraph): + def run_api(self): + self.generate_test_value() + out = self.l_value.logical_and_(self.r_value) + + self.generate_test_value() + out_reverse = self.r_value.logical_and_(self.l_value) + + return out, out_reverse + + +create_test_case( + TestAPILogicalAndInplaceInDygraph, 'float16', 'float32', 'bool' +) +create_test_case( + TestAPILogicalAndInplaceInDygraph, 'float16', 'float64', 'bool' +) + +create_test_case( + TestAPILogicalAndInplaceInDygraph, 'float32', 'float64', 'bool' +) + +create_test_case(TestAPILogicalAndInplaceInDygraph, 'complex64', 'bool', 'bool') +create_test_case(TestAPILogicalAndInplaceInDygraph, 'complex64', 'int8', 'bool') +create_test_case( + TestAPILogicalAndInplaceInDygraph, 'complex64', 'int16', 'bool' +) +create_test_case( + TestAPILogicalAndInplaceInDygraph, 'complex64', 'int32', 'bool' +) +create_test_case( + TestAPILogicalAndInplaceInDygraph, 'complex64', 'int64', 'bool' +) +create_test_case( + TestAPILogicalAndInplaceInDygraph, 'complex64', 'float16', 'bool' +) +create_test_case( + TestAPILogicalAndInplaceInDygraph, 'complex64', 'float32', 'bool' +) +create_test_case( + TestAPILogicalAndInplaceInDygraph, 'complex64', 'float64', 'bool' +) +create_test_case( + TestAPILogicalAndInplaceInDygraph, 'complex64', 'complex128', 'bool' +) + +create_test_case( + TestAPILogicalAndInplaceInDygraph, 'complex128', 'bool', 'bool' +) +create_test_case( + TestAPILogicalAndInplaceInDygraph, 'complex128', 'int8', 'bool' +) +create_test_case( + TestAPILogicalAndInplaceInDygraph, 'complex128', 'int16', 'bool' +) +create_test_case( + TestAPILogicalAndInplaceInDygraph, 'complex128', 'int32', 'bool' +) +create_test_case( + TestAPILogicalAndInplaceInDygraph, 'complex128', 'int64', 'bool' +) +create_test_case( + TestAPILogicalAndInplaceInDygraph, 'complex128', 'float16', 'bool' +) +create_test_case( + TestAPILogicalAndInplaceInDygraph, 'complex128', 'float32', 'bool' +) +create_test_case( + TestAPILogicalAndInplaceInDygraph, 'complex128', 'float64', 'bool' +) + + +class TestAPILogicalOrInDygraph(TestOperatorOverloadEqualInDygraph): + def run_api(self): + self.generate_test_value() + + out = paddle.logical_or(self.l_value, self.r_value) + out_reverse = paddle.logical_or(self.r_value, self.l_value) + + return out, out_reverse + + +create_test_case(TestAPILogicalOrInDygraph, 'float16', 'float32', 'bool') +create_test_case(TestAPILogicalOrInDygraph, 'float16', 'float64', 'bool') + +create_test_case(TestAPILogicalOrInDygraph, 'float32', 'float64', 'bool') + +create_test_case(TestAPILogicalOrInDygraph, 'complex64', 'bool', 'bool') +create_test_case(TestAPILogicalOrInDygraph, 'complex64', 'int8', 'bool') +create_test_case(TestAPILogicalOrInDygraph, 'complex64', 'int16', 'bool') +create_test_case(TestAPILogicalOrInDygraph, 'complex64', 'int32', 'bool') +create_test_case(TestAPILogicalOrInDygraph, 'complex64', 'int64', 'bool') +create_test_case(TestAPILogicalOrInDygraph, 'complex64', 'float16', 'bool') +create_test_case(TestAPILogicalOrInDygraph, 'complex64', 'float32', 'bool') +create_test_case(TestAPILogicalOrInDygraph, 'complex64', 'float64', 'bool') +create_test_case(TestAPILogicalOrInDygraph, 'complex64', 'complex128', 'bool') + +create_test_case(TestAPILogicalOrInDygraph, 'complex128', 'bool', 'bool') +create_test_case(TestAPILogicalOrInDygraph, 'complex128', 'int8', 'bool') +create_test_case(TestAPILogicalOrInDygraph, 'complex128', 'int16', 'bool') +create_test_case(TestAPILogicalOrInDygraph, 'complex128', 'int32', 'bool') +create_test_case(TestAPILogicalOrInDygraph, 'complex128', 'int64', 'bool') +create_test_case(TestAPILogicalOrInDygraph, 'complex128', 'float16', 'bool') +create_test_case(TestAPILogicalOrInDygraph, 'complex128', 'float32', 'bool') +create_test_case(TestAPILogicalOrInDygraph, 'complex128', 'float64', 'bool') + + +class TestAPILogicalOrInplaceInDygraph(TestOperatorOverloadEqualInDygraph): + def run_api(self): + self.generate_test_value() + out = self.l_value.logical_or_(self.r_value) + + self.generate_test_value() + out_reverse = self.r_value.logical_or_(self.l_value) + + return out, out_reverse + + +create_test_case(TestAPILogicalOrInplaceInDygraph, 'float16', 'float32', 'bool') +create_test_case(TestAPILogicalOrInplaceInDygraph, 'float16', 'float64', 'bool') + +create_test_case(TestAPILogicalOrInplaceInDygraph, 'float32', 'float64', 'bool') + +create_test_case(TestAPILogicalOrInplaceInDygraph, 'complex64', 'bool', 'bool') +create_test_case(TestAPILogicalOrInplaceInDygraph, 'complex64', 'int8', 'bool') +create_test_case(TestAPILogicalOrInplaceInDygraph, 'complex64', 'int16', 'bool') +create_test_case(TestAPILogicalOrInplaceInDygraph, 'complex64', 'int32', 'bool') +create_test_case(TestAPILogicalOrInplaceInDygraph, 'complex64', 'int64', 'bool') +create_test_case( + TestAPILogicalOrInplaceInDygraph, 'complex64', 'float16', 'bool' +) +create_test_case( + TestAPILogicalOrInplaceInDygraph, 'complex64', 'float32', 'bool' +) +create_test_case( + TestAPILogicalOrInplaceInDygraph, 'complex64', 'float64', 'bool' +) +create_test_case( + TestAPILogicalOrInplaceInDygraph, 'complex64', 'complex128', 'bool' +) + +create_test_case(TestAPILogicalOrInplaceInDygraph, 'complex128', 'bool', 'bool') +create_test_case(TestAPILogicalOrInplaceInDygraph, 'complex128', 'int8', 'bool') +create_test_case( + TestAPILogicalOrInplaceInDygraph, 'complex128', 'int16', 'bool' +) +create_test_case( + TestAPILogicalOrInplaceInDygraph, 'complex128', 'int32', 'bool' +) +create_test_case( + TestAPILogicalOrInplaceInDygraph, 'complex128', 'int64', 'bool' +) +create_test_case( + TestAPILogicalOrInplaceInDygraph, 'complex128', 'float16', 'bool' +) +create_test_case( + TestAPILogicalOrInplaceInDygraph, 'complex128', 'float32', 'bool' +) +create_test_case( + TestAPILogicalOrInplaceInDygraph, 'complex128', 'float64', 'bool' +) + + +class TestAPILogicalXorInDygraph(TestOperatorOverloadEqualInDygraph): + def run_api(self): + self.generate_test_value() + + out = paddle.logical_xor(self.l_value, self.r_value) + out_reverse = paddle.logical_xor(self.r_value, self.l_value) + + return out, out_reverse + + +create_test_case(TestAPILogicalXorInDygraph, 'float16', 'float32', 'bool') +create_test_case(TestAPILogicalXorInDygraph, 'float16', 'float64', 'bool') + +create_test_case(TestAPILogicalXorInDygraph, 'float32', 'float64', 'bool') + +create_test_case(TestAPILogicalXorInDygraph, 'complex64', 'bool', 'bool') +create_test_case(TestAPILogicalXorInDygraph, 'complex64', 'int8', 'bool') +create_test_case(TestAPILogicalXorInDygraph, 'complex64', 'int16', 'bool') +create_test_case(TestAPILogicalXorInDygraph, 'complex64', 'int32', 'bool') +create_test_case(TestAPILogicalXorInDygraph, 'complex64', 'int64', 'bool') +create_test_case(TestAPILogicalXorInDygraph, 'complex64', 'float16', 'bool') +create_test_case(TestAPILogicalXorInDygraph, 'complex64', 'float32', 'bool') +create_test_case(TestAPILogicalXorInDygraph, 'complex64', 'float64', 'bool') +create_test_case(TestAPILogicalXorInDygraph, 'complex64', 'complex128', 'bool') + +create_test_case(TestAPILogicalXorInDygraph, 'complex128', 'bool', 'bool') +create_test_case(TestAPILogicalXorInDygraph, 'complex128', 'int8', 'bool') +create_test_case(TestAPILogicalXorInDygraph, 'complex128', 'int16', 'bool') +create_test_case(TestAPILogicalXorInDygraph, 'complex128', 'int32', 'bool') +create_test_case(TestAPILogicalXorInDygraph, 'complex128', 'int64', 'bool') +create_test_case(TestAPILogicalXorInDygraph, 'complex128', 'float16', 'bool') +create_test_case(TestAPILogicalXorInDygraph, 'complex128', 'float32', 'bool') +create_test_case(TestAPILogicalXorInDygraph, 'complex128', 'float64', 'bool') + + +class TestAPILogicalXorInplaceInDygraph(TestOperatorOverloadEqualInDygraph): + def run_api(self): + self.generate_test_value() + out = self.l_value.logical_xor_(self.r_value) + + self.generate_test_value() + out_reverse = self.r_value.logical_xor_(self.l_value) + + return out, out_reverse + + +create_test_case( + TestAPILogicalXorInplaceInDygraph, 'float16', 'float32', 'bool' +) +create_test_case( + TestAPILogicalXorInplaceInDygraph, 'float16', 'float64', 'bool' +) + +create_test_case( + TestAPILogicalXorInplaceInDygraph, 'float32', 'float64', 'bool' +) + +create_test_case(TestAPILogicalXorInplaceInDygraph, 'complex64', 'bool', 'bool') +create_test_case(TestAPILogicalXorInplaceInDygraph, 'complex64', 'int8', 'bool') +create_test_case( + TestAPILogicalXorInplaceInDygraph, 'complex64', 'int16', 'bool' +) +create_test_case( + TestAPILogicalXorInplaceInDygraph, 'complex64', 'int32', 'bool' +) +create_test_case( + TestAPILogicalXorInplaceInDygraph, 'complex64', 'int64', 'bool' +) +create_test_case( + TestAPILogicalXorInplaceInDygraph, 'complex64', 'float16', 'bool' +) +create_test_case( + TestAPILogicalXorInplaceInDygraph, 'complex64', 'float32', 'bool' +) +create_test_case( + TestAPILogicalXorInplaceInDygraph, 'complex64', 'float64', 'bool' +) +create_test_case( + TestAPILogicalXorInplaceInDygraph, 'complex64', 'complex128', 'bool' +) + +create_test_case( + TestAPILogicalXorInplaceInDygraph, 'complex128', 'bool', 'bool' +) +create_test_case( + TestAPILogicalXorInplaceInDygraph, 'complex128', 'int8', 'bool' +) +create_test_case( + TestAPILogicalXorInplaceInDygraph, 'complex128', 'int16', 'bool' +) +create_test_case( + TestAPILogicalXorInplaceInDygraph, 'complex128', 'int32', 'bool' +) +create_test_case( + TestAPILogicalXorInplaceInDygraph, 'complex128', 'int64', 'bool' +) +create_test_case( + TestAPILogicalXorInplaceInDygraph, 'complex128', 'float16', 'bool' +) +create_test_case( + TestAPILogicalXorInplaceInDygraph, 'complex128', 'float32', 'bool' +) +create_test_case( + TestAPILogicalXorInplaceInDygraph, 'complex128', 'float64', 'bool' +) + + +class TestAPIFmaxInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + + out = paddle.fmax(self.l_value, self.r_value) + out_reverse = paddle.fmax(self.r_value, self.l_value) + + return out, out_reverse + + +create_test_case(TestAPIFmaxInDygraph, 'float16', 'float32', 'float32') +create_test_case(TestAPIFmaxInDygraph, 'float16', 'float64', 'float64') + +create_test_case(TestAPIFmaxInDygraph, 'float32', 'float64', 'float64') + + +class TestAPIFminInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + + out = paddle.fmin(self.l_value, self.r_value) + out_reverse = paddle.fmin(self.r_value, self.l_value) + + return out, out_reverse + + +create_test_case(TestAPIFminInDygraph, 'float16', 'float32', 'float32') +create_test_case(TestAPIFminInDygraph, 'float16', 'float64', 'float64') + +create_test_case(TestAPIFminInDygraph, 'float32', 'float64', 'float64') + + +class TestAPILogAddExpInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + + out = paddle.logaddexp(self.l_value, self.r_value) + out_reverse = paddle.logaddexp(self.r_value, self.l_value) + + return out, out_reverse + + +create_test_case(TestAPILogAddExpInDygraph, 'float16', 'float32', 'float32') +create_test_case(TestAPILogAddExpInDygraph, 'float16', 'float64', 'float64') + +create_test_case(TestAPILogAddExpInDygraph, 'float32', 'float64', 'float64') + + +class TestAPIMaximumInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + + out = paddle.maximum(self.l_value, self.r_value) + out_reverse = paddle.maximum(self.r_value, self.l_value) + + return out, out_reverse + + +create_test_case(TestAPIMaximumInDygraph, 'float32', 'float64', 'float64') + + +class TestAPIMinimumInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + + out = paddle.minimum(self.l_value, self.r_value) + out_reverse = paddle.minimum(self.r_value, self.l_value) + + return out, out_reverse + + +create_test_case(TestAPIMinimumInDygraph, 'float32', 'float64', 'float64') + + +class TestAPINextAfterInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + + out = paddle.nextafter(self.l_value, self.r_value) + out_reverse = paddle.nextafter(self.r_value, self.l_value) + + return out, out_reverse + + +create_test_case(TestAPINextAfterInDygraph, 'float32', 'float64', 'float64') + + +class TestAPIAtan2InDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + + out = paddle.atan2(self.l_value, self.r_value) + out_reverse = paddle.atan2(self.r_value, self.l_value) + + return out, out_reverse + + +create_test_case(TestAPIAtan2InDygraph, 'float16', 'float32', 'float32') +create_test_case(TestAPIAtan2InDygraph, 'float16', 'float64', 'float64') + +create_test_case(TestAPIAtan2InDygraph, 'float32', 'float64', 'float64') + + +class TestAPIPoissonNllLossInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + + out = paddle.nn.functional.poisson_nll_loss(self.l_value, self.r_value) + out_reverse = paddle.nn.functional.poisson_nll_loss( + self.r_value, self.l_value + ) + + return out, out_reverse + + +create_test_case( + TestAPIPoissonNllLossInDygraph, 'float16', 'float32', 'float32' +) +create_test_case( + TestAPIPoissonNllLossInDygraph, 'float16', 'float64', 'float64' +) + +create_test_case( + TestAPIPoissonNllLossInDygraph, 'float32', 'float64', 'float64' +) + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case( + TestAPIPoissonNllLossInDygraph, 'bfloat16', 'float16', 'float32' + ) + create_test_case( + TestAPIPoissonNllLossInDygraph, 'bfloat16', 'float32', 'float32' + ) + create_test_case( + TestAPIPoissonNllLossInDygraph, 'bfloat16', 'float64', 'float64' + ) + + +class TestAPIL1LossInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + + out = paddle.nn.functional.l1_loss(self.l_value, self.r_value) + out_reverse = paddle.nn.functional.l1_loss(self.r_value, self.l_value) + + return out, out_reverse + + +create_test_case(TestAPIL1LossInDygraph, 'float16', 'float32', 'float32') +create_test_case(TestAPIL1LossInDygraph, 'float16', 'float64', 'float64') + +create_test_case(TestAPIL1LossInDygraph, 'float32', 'float64', 'float64') + + +class TestAPISmoothL1LossInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + + out = paddle.nn.functional.smooth_l1_loss(self.l_value, self.r_value) + out_reverse = paddle.nn.functional.smooth_l1_loss( + self.r_value, self.l_value + ) + + return out, out_reverse + + +create_test_case(TestAPISmoothL1LossInDygraph, 'float16', 'float32', 'float32') +create_test_case(TestAPISmoothL1LossInDygraph, 'float16', 'float64', 'float64') + +create_test_case(TestAPISmoothL1LossInDygraph, 'float32', 'float64', 'float64') + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case( + TestAPISmoothL1LossInDygraph, 'bfloat16', 'float16', 'float32' + ) + create_test_case( + TestAPISmoothL1LossInDygraph, 'bfloat16', 'float32', 'float32' + ) + create_test_case( + TestAPISmoothL1LossInDygraph, 'bfloat16', 'float64', 'float64' + ) + + +class TestAPIHuberLossInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + + out = paddle._C_ops.huber_loss(self.l_value, self.r_value, 1.0) + out_reverse = paddle._C_ops.huber_loss(self.r_value, self.l_value, 1.0) + + return out, out_reverse + + +create_test_case(TestAPIHuberLossInDygraph, 'float16', 'float32', 'float32') +create_test_case(TestAPIHuberLossInDygraph, 'float16', 'float64', 'float64') + +create_test_case(TestAPIHuberLossInDygraph, 'float32', 'float64', 'float64') + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case( + TestAPIHuberLossInDygraph, 'bfloat16', 'float16', 'float32' + ) + create_test_case( + TestAPIHuberLossInDygraph, 'bfloat16', 'float32', 'float32' + ) + create_test_case( + TestAPIHuberLossInDygraph, 'bfloat16', 'float64', 'float64' + ) + + +class TestAPIMSELossInDygraph(TestOperatorOverloadAddInDygraph): + def run_api(self): + self.generate_test_value() + + out = paddle.nn.functional.mse_loss(self.l_value, self.r_value) + out_reverse = paddle.nn.functional.mse_loss(self.r_value, self.l_value) + + return out, out_reverse + + +create_test_case(TestAPIMSELossInDygraph, 'float32', 'float64', 'float64') + + class TestOperatorOverloadAddInStatic(unittest.TestCase): def setUp(self): paddle.enable_static() @@ -72,7 +2057,784 @@ def setUp(self): def set_dtype(self): self.ldtype = 'float32' self.rdtype = 'float64' - self.expected_out_dtype = 'float64' + self.expected_out_dtype = 'float64' + + def generate_test_value(self): + self.l_value = (paddle.randn((4, 3, 2)) * 10).astype(self.ldtype) + self.r_value = (paddle.randn((4, 3, 2)) * 10).astype(self.rdtype) + + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = self.l_value + self.r_value + out_reverse = self.r_value + self.l_value + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + def test_dtype_is_expected(self): + res = self.run_api() + self.assertEqual(res[0].dtype.__str__(), self.expected_out_dtype) + self.assertEqual(res[1].dtype.__str__(), self.expected_out_dtype) + + +create_test_case( + TestOperatorOverloadAddInStatic, 'float16', 'float32', 'float32' +) +create_test_case( + TestOperatorOverloadAddInStatic, 'float16', 'float64', 'float64' +) + +create_test_case( + TestOperatorOverloadAddInStatic, 'float32', 'float64', 'float64' +) + + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case( + TestOperatorOverloadAddInStatic, 'bfloat16', 'float16', 'float32' + ) + create_test_case( + TestOperatorOverloadAddInStatic, 'bfloat16', 'float32', 'float32' + ) + create_test_case( + TestOperatorOverloadAddInStatic, 'bfloat16', 'float64', 'float64' + ) + create_test_case( + TestOperatorOverloadAddInStatic, 'bfloat16', 'complex64', 'complex64' + ) + create_test_case( + TestOperatorOverloadAddInStatic, 'bfloat16', 'complex128', 'complex128' + ) + +create_test_case( + TestOperatorOverloadAddInStatic, 'complex64', 'bool', 'complex64' +) +create_test_case( + TestOperatorOverloadAddInStatic, 'complex64', 'int8', 'complex64' +) +create_test_case( + TestOperatorOverloadAddInStatic, 'complex64', 'uint8', 'complex64' +) +create_test_case( + TestOperatorOverloadAddInStatic, 'complex64', 'int16', 'complex64' +) +create_test_case( + TestOperatorOverloadAddInStatic, 'complex64', 'int32', 'complex64' +) +create_test_case( + TestOperatorOverloadAddInStatic, 'complex64', 'int64', 'complex64' +) +create_test_case( + TestOperatorOverloadAddInStatic, 'complex64', 'float16', 'complex64' +) +create_test_case( + TestOperatorOverloadAddInStatic, 'complex64', 'float32', 'complex64' +) +create_test_case( + TestOperatorOverloadAddInStatic, 'complex64', 'float64', 'complex128' +) +create_test_case( + TestOperatorOverloadAddInStatic, 'complex64', 'complex128', 'complex128' +) + +create_test_case( + TestOperatorOverloadAddInStatic, 'complex128', 'bool', 'complex128' +) +create_test_case( + TestOperatorOverloadAddInStatic, 'complex128', 'int8', 'complex128' +) +create_test_case( + TestOperatorOverloadAddInStatic, 'complex128', 'uint8', 'complex128' +) +create_test_case( + TestOperatorOverloadAddInStatic, 'complex128', 'int16', 'complex128' +) +create_test_case( + TestOperatorOverloadAddInStatic, 'complex128', 'int32', 'complex128' +) +create_test_case( + TestOperatorOverloadAddInStatic, 'complex128', 'int64', 'complex128' +) +create_test_case( + TestOperatorOverloadAddInStatic, 'complex128', 'float16', 'complex128' +) +create_test_case( + TestOperatorOverloadAddInStatic, 'complex128', 'float32', 'complex128' +) +create_test_case( + TestOperatorOverloadAddInStatic, 'complex128', 'float64', 'complex128' +) + + +class TestAPIAddInStatic(TestOperatorOverloadAddInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = paddle.add(self.l_value, self.r_value) + out_reverse = paddle.add(self.r_value, self.l_value) + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case(TestAPIAddInStatic, 'float16', 'float32', 'float32') +create_test_case(TestAPIAddInStatic, 'float16', 'float64', 'float64') + +create_test_case(TestAPIAddInStatic, 'float32', 'float64', 'float64') + + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case(TestAPIAddInStatic, 'bfloat16', 'float16', 'float32') + create_test_case(TestAPIAddInStatic, 'bfloat16', 'float32', 'float32') + create_test_case(TestAPIAddInStatic, 'bfloat16', 'float64', 'float64') + create_test_case(TestAPIAddInStatic, 'bfloat16', 'complex64', 'complex64') + create_test_case(TestAPIAddInStatic, 'bfloat16', 'complex128', 'complex128') + +create_test_case(TestAPIAddInStatic, 'complex64', 'bool', 'complex64') +create_test_case(TestAPIAddInStatic, 'complex64', 'int32', 'complex64') +create_test_case(TestAPIAddInStatic, 'complex64', 'int64', 'complex64') +create_test_case(TestAPIAddInStatic, 'complex64', 'float16', 'complex64') +create_test_case(TestAPIAddInStatic, 'complex64', 'float32', 'complex64') +create_test_case(TestAPIAddInStatic, 'complex64', 'float64', 'complex128') +create_test_case(TestAPIAddInStatic, 'complex64', 'complex128', 'complex128') + +create_test_case(TestAPIAddInStatic, 'complex128', 'bool', 'complex128') +create_test_case(TestAPIAddInStatic, 'complex128', 'int32', 'complex128') +create_test_case(TestAPIAddInStatic, 'complex128', 'int64', 'complex128') +create_test_case(TestAPIAddInStatic, 'complex128', 'float16', 'complex128') +create_test_case(TestAPIAddInStatic, 'complex128', 'float32', 'complex128') +create_test_case(TestAPIAddInStatic, 'complex128', 'float64', 'complex128') + + +class TestOperatorOverloadSubInStatic(TestOperatorOverloadAddInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = self.l_value - self.r_value + out_reverse = self.r_value - self.l_value + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case( + TestOperatorOverloadSubInStatic, 'float16', 'float32', 'float32' +) +create_test_case( + TestOperatorOverloadSubInStatic, 'float16', 'float64', 'float64' +) + +create_test_case( + TestOperatorOverloadSubInStatic, 'float32', 'float64', 'float64' +) + + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case( + TestOperatorOverloadSubInStatic, 'bfloat16', 'float16', 'float32' + ) + create_test_case( + TestOperatorOverloadSubInStatic, 'bfloat16', 'float32', 'float32' + ) + create_test_case( + TestOperatorOverloadSubInStatic, 'bfloat16', 'float64', 'float64' + ) + create_test_case( + TestOperatorOverloadSubInStatic, 'bfloat16', 'complex64', 'complex64' + ) + create_test_case( + TestOperatorOverloadSubInStatic, 'bfloat16', 'complex128', 'complex128' + ) + +create_test_case( + TestOperatorOverloadSubInStatic, 'complex64', 'bool', 'complex64' +) +create_test_case( + TestOperatorOverloadSubInStatic, 'complex64', 'int8', 'complex64' +) +create_test_case( + TestOperatorOverloadSubInStatic, 'complex64', 'uint8', 'complex64' +) +create_test_case( + TestOperatorOverloadSubInStatic, 'complex64', 'int16', 'complex64' +) +create_test_case( + TestOperatorOverloadSubInStatic, 'complex64', 'int32', 'complex64' +) +create_test_case( + TestOperatorOverloadSubInStatic, 'complex64', 'int64', 'complex64' +) +create_test_case( + TestOperatorOverloadSubInStatic, 'complex64', 'float16', 'complex64' +) +create_test_case( + TestOperatorOverloadSubInStatic, 'complex64', 'float32', 'complex64' +) +create_test_case( + TestOperatorOverloadSubInStatic, 'complex64', 'float64', 'complex128' +) +create_test_case( + TestOperatorOverloadSubInStatic, 'complex64', 'complex128', 'complex128' +) + +create_test_case( + TestOperatorOverloadSubInStatic, 'complex128', 'bool', 'complex128' +) +create_test_case( + TestOperatorOverloadSubInStatic, 'complex128', 'int8', 'complex128' +) +create_test_case( + TestOperatorOverloadSubInStatic, 'complex128', 'uint8', 'complex128' +) +create_test_case( + TestOperatorOverloadSubInStatic, 'complex128', 'int16', 'complex128' +) +create_test_case( + TestOperatorOverloadSubInStatic, 'complex128', 'int32', 'complex128' +) +create_test_case( + TestOperatorOverloadSubInStatic, 'complex128', 'int64', 'complex128' +) +create_test_case( + TestOperatorOverloadSubInStatic, 'complex128', 'float16', 'complex128' +) +create_test_case( + TestOperatorOverloadSubInStatic, 'complex128', 'float32', 'complex128' +) +create_test_case( + TestOperatorOverloadSubInStatic, 'complex128', 'float64', 'complex128' +) + + +class TestAPISubInStatic(TestOperatorOverloadAddInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = paddle.subtract(self.l_value, self.r_value) + out_reverse = paddle.subtract(self.r_value, self.l_value) + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case(TestAPISubInStatic, 'float16', 'float32', 'float32') +create_test_case(TestAPISubInStatic, 'float16', 'float64', 'float64') + +create_test_case(TestAPISubInStatic, 'float32', 'float64', 'float64') + + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case(TestAPISubInStatic, 'bfloat16', 'float16', 'float32') + create_test_case(TestAPISubInStatic, 'bfloat16', 'float32', 'float32') + create_test_case(TestAPISubInStatic, 'bfloat16', 'float64', 'float64') + create_test_case(TestAPISubInStatic, 'bfloat16', 'complex64', 'complex64') + create_test_case(TestAPISubInStatic, 'bfloat16', 'complex128', 'complex128') + +create_test_case(TestAPISubInStatic, 'complex64', 'bool', 'complex64') +create_test_case(TestAPISubInStatic, 'complex64', 'int32', 'complex64') +create_test_case(TestAPISubInStatic, 'complex64', 'int64', 'complex64') +create_test_case(TestAPISubInStatic, 'complex64', 'float16', 'complex64') +create_test_case(TestAPISubInStatic, 'complex64', 'float32', 'complex64') +create_test_case(TestAPISubInStatic, 'complex64', 'float64', 'complex128') +create_test_case(TestAPISubInStatic, 'complex64', 'complex128', 'complex128') + +create_test_case(TestAPISubInStatic, 'complex128', 'bool', 'complex128') +create_test_case(TestAPISubInStatic, 'complex128', 'int32', 'complex128') +create_test_case(TestAPISubInStatic, 'complex128', 'int64', 'complex128') +create_test_case(TestAPISubInStatic, 'complex128', 'float16', 'complex128') +create_test_case(TestAPISubInStatic, 'complex128', 'float32', 'complex128') +create_test_case(TestAPISubInStatic, 'complex128', 'float64', 'complex128') + + +class TestOperatorOverloadMulInStatic(TestOperatorOverloadAddInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = self.l_value * self.r_value + out_reverse = self.r_value * self.l_value + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case( + TestOperatorOverloadMulInStatic, 'float16', 'float32', 'float32' +) +create_test_case( + TestOperatorOverloadMulInStatic, 'float16', 'float64', 'float64' +) + +create_test_case( + TestOperatorOverloadMulInStatic, 'float32', 'float64', 'float64' +) + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case( + TestOperatorOverloadMulInStatic, 'bfloat16', 'float16', 'float32' + ) + create_test_case( + TestOperatorOverloadMulInStatic, 'bfloat16', 'float32', 'float32' + ) + create_test_case( + TestOperatorOverloadMulInStatic, 'bfloat16', 'float64', 'float64' + ) + create_test_case( + TestOperatorOverloadMulInStatic, 'bfloat16', 'complex64', 'complex64' + ) + create_test_case( + TestOperatorOverloadMulInStatic, 'bfloat16', 'complex128', 'complex128' + ) + +create_test_case( + TestOperatorOverloadMulInStatic, 'complex64', 'bool', 'complex64' +) +create_test_case( + TestOperatorOverloadMulInStatic, 'complex64', 'int8', 'complex64' +) +create_test_case( + TestOperatorOverloadMulInStatic, 'complex64', 'uint8', 'complex64' +) +create_test_case( + TestOperatorOverloadMulInStatic, 'complex64', 'int16', 'complex64' +) +create_test_case( + TestOperatorOverloadMulInStatic, 'complex64', 'int32', 'complex64' +) +create_test_case( + TestOperatorOverloadMulInStatic, 'complex64', 'int64', 'complex64' +) +create_test_case( + TestOperatorOverloadMulInStatic, 'complex64', 'float16', 'complex64' +) +create_test_case( + TestOperatorOverloadMulInStatic, 'complex64', 'float32', 'complex64' +) +create_test_case( + TestOperatorOverloadMulInStatic, 'complex64', 'float64', 'complex128' +) +create_test_case( + TestOperatorOverloadMulInStatic, 'complex64', 'complex128', 'complex128' +) + +create_test_case( + TestOperatorOverloadMulInStatic, 'complex128', 'bool', 'complex128' +) +create_test_case( + TestOperatorOverloadMulInStatic, 'complex128', 'int8', 'complex128' +) +create_test_case( + TestOperatorOverloadMulInStatic, 'complex128', 'uint8', 'complex128' +) +create_test_case( + TestOperatorOverloadMulInStatic, 'complex128', 'int16', 'complex128' +) +create_test_case( + TestOperatorOverloadMulInStatic, 'complex128', 'int32', 'complex128' +) +create_test_case( + TestOperatorOverloadMulInStatic, 'complex128', 'int64', 'complex128' +) +create_test_case( + TestOperatorOverloadMulInStatic, 'complex128', 'float16', 'complex128' +) +create_test_case( + TestOperatorOverloadMulInStatic, 'complex128', 'float32', 'complex128' +) +create_test_case( + TestOperatorOverloadMulInStatic, 'complex128', 'float64', 'complex128' +) + + +class TestAPIMulInStatic(TestOperatorOverloadAddInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = paddle.multiply(self.l_value, self.r_value) + out_reverse = paddle.multiply(self.r_value, self.l_value) + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case(TestAPIMulInStatic, 'float16', 'float32', 'float32') +create_test_case(TestAPIMulInStatic, 'float16', 'float64', 'float64') + +create_test_case(TestAPIMulInStatic, 'float32', 'float64', 'float64') + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case(TestAPIMulInStatic, 'bfloat16', 'float16', 'float32') + create_test_case(TestAPIMulInStatic, 'bfloat16', 'float32', 'float32') + create_test_case(TestAPIMulInStatic, 'bfloat16', 'float64', 'float64') + create_test_case(TestAPIMulInStatic, 'bfloat16', 'complex64', 'complex64') + create_test_case(TestAPIMulInStatic, 'bfloat16', 'complex128', 'complex128') + +create_test_case(TestAPIMulInStatic, 'complex64', 'bool', 'complex64') +create_test_case(TestAPIMulInStatic, 'complex64', 'int32', 'complex64') +create_test_case(TestAPIMulInStatic, 'complex64', 'int64', 'complex64') +create_test_case(TestAPIMulInStatic, 'complex64', 'float16', 'complex64') +create_test_case(TestAPIMulInStatic, 'complex64', 'float32', 'complex64') +create_test_case(TestAPIMulInStatic, 'complex64', 'float64', 'complex128') +create_test_case(TestAPIMulInStatic, 'complex64', 'complex128', 'complex128') + +create_test_case(TestAPIMulInStatic, 'complex128', 'bool', 'complex128') +create_test_case(TestAPIMulInStatic, 'complex128', 'int32', 'complex128') +create_test_case(TestAPIMulInStatic, 'complex128', 'int64', 'complex128') +create_test_case(TestAPIMulInStatic, 'complex128', 'float16', 'complex128') +create_test_case(TestAPIMulInStatic, 'complex128', 'float32', 'complex128') +create_test_case(TestAPIMulInStatic, 'complex128', 'float64', 'complex128') + + +class TestAPIDivInStatic(TestOperatorOverloadAddInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = paddle.divide(self.l_value, self.r_value) + out_reverse = paddle.divide(self.r_value, self.l_value) + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case(TestAPIDivInStatic, 'float16', 'float32', 'float32') +create_test_case(TestAPIDivInStatic, 'float16', 'float64', 'float64') + +create_test_case(TestAPIDivInStatic, 'float32', 'float64', 'float64') + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case(TestAPIDivInStatic, 'bfloat16', 'float16', 'float32') + create_test_case(TestAPIDivInStatic, 'bfloat16', 'float32', 'float32') + create_test_case(TestAPIDivInStatic, 'bfloat16', 'float64', 'float64') + create_test_case(TestAPIDivInStatic, 'bfloat16', 'complex64', 'complex64') + create_test_case(TestAPIDivInStatic, 'bfloat16', 'complex128', 'complex128') + +create_test_case(TestAPIDivInStatic, 'complex64', 'bool', 'complex64') +create_test_case(TestAPIDivInStatic, 'complex64', 'int32', 'complex64') +create_test_case(TestAPIDivInStatic, 'complex64', 'int64', 'complex64') +create_test_case(TestAPIDivInStatic, 'complex64', 'float16', 'complex64') +create_test_case(TestAPIDivInStatic, 'complex64', 'float32', 'complex64') +create_test_case(TestAPIDivInStatic, 'complex64', 'float64', 'complex128') +create_test_case(TestAPIDivInStatic, 'complex64', 'complex128', 'complex128') + +create_test_case(TestAPIDivInStatic, 'complex128', 'bool', 'complex128') +create_test_case(TestAPIDivInStatic, 'complex128', 'int32', 'complex128') +create_test_case(TestAPIDivInStatic, 'complex128', 'int64', 'complex128') +create_test_case(TestAPIDivInStatic, 'complex128', 'float16', 'complex128') +create_test_case(TestAPIDivInStatic, 'complex128', 'float32', 'complex128') +create_test_case(TestAPIDivInStatic, 'complex128', 'float64', 'complex128') + + +class TestOperatorOverloadDivInStatic(TestOperatorOverloadAddInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = self.l_value / self.r_value + out_reverse = self.r_value / self.l_value + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case( + TestOperatorOverloadDivInStatic, 'float16', 'float32', 'float32' +) +create_test_case( + TestOperatorOverloadDivInStatic, 'float16', 'float64', 'float64' +) + +create_test_case( + TestOperatorOverloadDivInStatic, 'float32', 'float64', 'float64' +) + + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case( + TestOperatorOverloadDivInStatic, 'bfloat16', 'float16', 'float32' + ) + create_test_case( + TestOperatorOverloadDivInStatic, 'bfloat16', 'float32', 'float32' + ) + create_test_case( + TestOperatorOverloadDivInStatic, 'bfloat16', 'float64', 'float64' + ) + create_test_case( + TestOperatorOverloadDivInStatic, 'bfloat16', 'complex64', 'complex64' + ) + create_test_case( + TestOperatorOverloadDivInStatic, 'bfloat16', 'complex128', 'complex128' + ) + +create_test_case( + TestOperatorOverloadDivInStatic, 'complex64', 'bool', 'complex64' +) +create_test_case( + TestOperatorOverloadDivInStatic, 'complex64', 'int8', 'complex64' +) +create_test_case( + TestOperatorOverloadDivInStatic, 'complex64', 'uint8', 'complex64' +) +create_test_case( + TestOperatorOverloadDivInStatic, 'complex64', 'int16', 'complex64' +) +create_test_case( + TestOperatorOverloadDivInStatic, 'complex64', 'int32', 'complex64' +) +create_test_case( + TestOperatorOverloadDivInStatic, 'complex64', 'int64', 'complex64' +) +create_test_case( + TestOperatorOverloadDivInStatic, 'complex64', 'float16', 'complex64' +) +create_test_case( + TestOperatorOverloadDivInStatic, 'complex64', 'float32', 'complex64' +) +create_test_case( + TestOperatorOverloadDivInStatic, 'complex64', 'float64', 'complex128' +) +create_test_case( + TestOperatorOverloadDivInStatic, 'complex64', 'complex128', 'complex128' +) + +create_test_case( + TestOperatorOverloadDivInStatic, 'complex128', 'bool', 'complex128' +) +create_test_case( + TestOperatorOverloadDivInStatic, 'complex128', 'int8', 'complex128' +) +create_test_case( + TestOperatorOverloadDivInStatic, 'complex128', 'uint8', 'complex128' +) +create_test_case( + TestOperatorOverloadDivInStatic, 'complex128', 'int16', 'complex128' +) +create_test_case( + TestOperatorOverloadDivInStatic, 'complex128', 'int32', 'complex128' +) +create_test_case( + TestOperatorOverloadDivInStatic, 'complex128', 'int64', 'complex128' +) +create_test_case( + TestOperatorOverloadDivInStatic, 'complex128', 'float16', 'complex128' +) +create_test_case( + TestOperatorOverloadDivInStatic, 'complex128', 'float32', 'complex128' +) +create_test_case( + TestOperatorOverloadDivInStatic, 'complex128', 'float64', 'complex128' +) + + +class TestAPIFloorDivInStatic(TestOperatorOverloadAddInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = paddle.floor_divide(self.l_value, self.r_value) + out_reverse = paddle.floor_divide(self.r_value, self.l_value) + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case(TestAPIFloorDivInStatic, 'float16', 'float32', 'float32') +create_test_case(TestAPIFloorDivInStatic, 'float16', 'float64', 'float64') + +create_test_case(TestAPIFloorDivInStatic, 'float32', 'float64', 'float64') + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case(TestAPIFloorDivInStatic, 'bfloat16', 'float16', 'float32') + create_test_case(TestAPIFloorDivInStatic, 'bfloat16', 'float32', 'float32') + create_test_case(TestAPIFloorDivInStatic, 'bfloat16', 'float64', 'float64') + + +class TestOperatorOverloadFloorDivInStatic(TestOperatorOverloadAddInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = self.l_value // self.r_value + out_reverse = self.r_value // self.l_value + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case( + TestOperatorOverloadFloorDivInStatic, 'float16', 'float32', 'float32' +) +create_test_case( + TestOperatorOverloadFloorDivInStatic, 'float16', 'float64', 'float64' +) + +create_test_case( + TestOperatorOverloadFloorDivInStatic, 'float32', 'float64', 'float64' +) + + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case( + TestOperatorOverloadFloorDivInStatic, 'bfloat16', 'float16', 'float32' + ) + create_test_case( + TestOperatorOverloadFloorDivInStatic, 'bfloat16', 'float32', 'float32' + ) + create_test_case( + TestOperatorOverloadFloorDivInStatic, 'bfloat16', 'float64', 'float64' + ) + + +class TestAPIPowInStatic(TestOperatorOverloadAddInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = paddle.pow(self.l_value, self.r_value) + out_reverse = paddle.pow(self.r_value, self.l_value) + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case(TestAPIPowInStatic, 'float16', 'float32', 'float32') +create_test_case(TestAPIPowInStatic, 'float16', 'float64', 'float64') + +create_test_case(TestAPIPowInStatic, 'float32', 'float64', 'float64') + + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case(TestAPIPowInStatic, 'bfloat16', 'float16', 'float32') + create_test_case(TestAPIPowInStatic, 'bfloat16', 'float32', 'float32') + create_test_case(TestAPIPowInStatic, 'bfloat16', 'float64', 'float64') + + +class TestOperatorOverloadPowInStatic(TestOperatorOverloadAddInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = self.l_value**self.r_value + out_reverse = self.r_value**self.l_value + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case( + TestOperatorOverloadPowInStatic, 'float16', 'float32', 'float32' +) +create_test_case( + TestOperatorOverloadPowInStatic, 'float16', 'float64', 'float64' +) + +create_test_case( + TestOperatorOverloadPowInStatic, 'float32', 'float64', 'float64' +) + + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case( + TestOperatorOverloadPowInStatic, 'bfloat16', 'float16', 'float32' + ) + create_test_case( + TestOperatorOverloadPowInStatic, 'bfloat16', 'float32', 'float32' + ) + create_test_case( + TestOperatorOverloadPowInStatic, 'bfloat16', 'float64', 'float64' + ) + + +class TestAPIModInStatic(TestOperatorOverloadAddInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = paddle.mod(self.l_value, self.r_value) + out_reverse = paddle.mod(self.r_value, self.l_value) + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case(TestAPIModInStatic, 'float16', 'float32', 'float32') +create_test_case(TestAPIModInStatic, 'float16', 'float64', 'float64') + +create_test_case(TestAPIModInStatic, 'float32', 'float64', 'float64') + + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case(TestAPIModInStatic, 'bfloat16', 'float16', 'float32') + create_test_case(TestAPIModInStatic, 'bfloat16', 'float32', 'float32') + create_test_case(TestAPIModInStatic, 'bfloat16', 'float64', 'float64') + + +class TestOperatorOverloadModInStatic(TestOperatorOverloadAddInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = self.l_value % self.r_value + out_reverse = self.r_value % self.l_value + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case( + TestOperatorOverloadModInStatic, 'float16', 'float32', 'float32' +) +create_test_case( + TestOperatorOverloadModInStatic, 'float16', 'float64', 'float64' +) + +create_test_case( + TestOperatorOverloadModInStatic, 'float32', 'float64', 'float64' +) + + +if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): + create_test_case( + TestOperatorOverloadModInStatic, 'bfloat16', 'float16', 'float32' + ) + create_test_case( + TestOperatorOverloadModInStatic, 'bfloat16', 'float32', 'float32' + ) + create_test_case( + TestOperatorOverloadModInStatic, 'bfloat16', 'float64', 'float64' + ) + + +class TestOperatorOverloadEqualInStatic(unittest.TestCase): + def setUp(self): + paddle.enable_static() + self.set_dtype() + self.exe = paddle.static.Executor() + + def set_dtype(self): + self.ldtype = 'float32' + self.rdtype = 'float64' + self.expected_out_dtype = 'bool' def generate_test_value(self): self.l_value = (paddle.randn((4, 3, 2)) * 10).astype(self.ldtype) @@ -83,164 +2845,638 @@ def run_api(self): with paddle.static.program_guard(prog): self.generate_test_value() - out = self.l_value + self.r_value - out_reverse = self.r_value + self.l_value + out = self.l_value == self.r_value + out_reverse = self.r_value == self.l_value + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + def test_dtype_is_expected(self): + res = self.run_api() + self.assertEqual(res[0].dtype.__str__(), self.expected_out_dtype) + self.assertEqual(res[1].dtype.__str__(), self.expected_out_dtype) + + +create_test_case( + TestOperatorOverloadEqualInStatic, 'float16', 'float32', 'bool' +) +create_test_case( + TestOperatorOverloadEqualInStatic, 'float16', 'float64', 'bool' +) + +create_test_case( + TestOperatorOverloadEqualInStatic, 'float32', 'float64', 'bool' +) + + +class TestAPIEqualInStatic(TestOperatorOverloadEqualInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = paddle.equal(self.l_value, self.r_value) + out_reverse = paddle.equal(self.r_value, self.l_value) + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case(TestAPIEqualInStatic, 'float16', 'float32', 'bool') +create_test_case(TestAPIEqualInStatic, 'float16', 'float64', 'bool') + +create_test_case(TestAPIEqualInStatic, 'float32', 'float64', 'bool') + + +class TestAPINotEqualInStatic(TestOperatorOverloadEqualInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = paddle.not_equal(self.l_value, self.r_value) + out_reverse = paddle.not_equal(self.r_value, self.l_value) + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case(TestAPINotEqualInStatic, 'float16', 'float32', 'bool') +create_test_case(TestAPINotEqualInStatic, 'float16', 'float64', 'bool') + +create_test_case(TestAPINotEqualInStatic, 'float32', 'float64', 'bool') + + +class TestOperatorOverloadNotEqualInStatic(TestOperatorOverloadEqualInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = self.l_value != self.r_value + out_reverse = self.r_value != self.l_value + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case( + TestOperatorOverloadNotEqualInStatic, 'float16', 'float32', 'bool' +) +create_test_case( + TestOperatorOverloadNotEqualInStatic, 'float16', 'float64', 'bool' +) + +create_test_case( + TestOperatorOverloadNotEqualInStatic, 'float32', 'float64', 'bool' +) + + +class TestAPILessThanInStatic(TestOperatorOverloadEqualInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = paddle.less_than(self.l_value, self.r_value) + out_reverse = paddle.less_than(self.r_value, self.l_value) + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case(TestAPILessThanInStatic, 'float16', 'float32', 'bool') +create_test_case(TestAPILessThanInStatic, 'float16', 'float64', 'bool') + +create_test_case(TestAPILessThanInStatic, 'float32', 'float64', 'bool') + + +class TestOperatorOverloadLessThanInStatic(TestOperatorOverloadEqualInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = self.l_value < self.r_value + out_reverse = self.r_value < self.l_value + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case( + TestOperatorOverloadLessThanInStatic, 'float16', 'float32', 'bool' +) +create_test_case( + TestOperatorOverloadLessThanInStatic, 'float16', 'float64', 'bool' +) + +create_test_case( + TestOperatorOverloadLessThanInStatic, 'float32', 'float64', 'bool' +) + + +class TestAPILessEqualInStatic(TestOperatorOverloadEqualInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = paddle.less_equal(self.l_value, self.r_value) + out_reverse = paddle.less_equal(self.r_value, self.l_value) + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case(TestAPILessEqualInStatic, 'float16', 'float32', 'bool') +create_test_case(TestAPILessEqualInStatic, 'float16', 'float64', 'bool') + +create_test_case(TestAPILessEqualInStatic, 'float32', 'float64', 'bool') + + +class TestOperatorOverloadLessEqualInStatic(TestOperatorOverloadEqualInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = self.l_value <= self.r_value + out_reverse = self.r_value <= self.l_value res = self.exe.run(prog, fetch_list=[out, out_reverse]) return res - def test_dtype_is_expected(self): - res = self.run_api() - self.assertEqual(res[0].dtype.__str__(), self.expected_out_dtype) - self.assertEqual(res[1].dtype.__str__(), self.expected_out_dtype) - create_test_case( - TestOperatorOverloadAddInStatic, 'float16', 'float32', 'float32' + TestOperatorOverloadLessEqualInStatic, 'float16', 'float32', 'bool' ) create_test_case( - TestOperatorOverloadAddInStatic, 'float16', 'float64', 'float64' + TestOperatorOverloadLessEqualInStatic, 'float16', 'float64', 'bool' ) create_test_case( - TestOperatorOverloadAddInStatic, 'float32', 'float64', 'float64' + TestOperatorOverloadLessEqualInStatic, 'float32', 'float64', 'bool' ) -if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): - create_test_case( - TestOperatorOverloadAddInStatic, 'bfloat16', 'float16', 'float32' - ) - create_test_case( - TestOperatorOverloadAddInStatic, 'bfloat16', 'float32', 'float32' - ) - create_test_case( - TestOperatorOverloadAddInStatic, 'bfloat16', 'float64', 'float64' - ) +class TestAPIGreaterThanInStatic(TestOperatorOverloadEqualInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + out = paddle.greater_than(self.l_value, self.r_value) + out_reverse = paddle.greater_than(self.r_value, self.l_value) -class TestAPIAddInStatic(TestOperatorOverloadAddInStatic): + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case(TestAPIGreaterThanInStatic, 'float16', 'float32', 'bool') +create_test_case(TestAPIGreaterThanInStatic, 'float16', 'float64', 'bool') + +create_test_case(TestAPIGreaterThanInStatic, 'float32', 'float64', 'bool') + + +class TestOperatorOverloadGreaterThanInStatic( + TestOperatorOverloadEqualInStatic +): def run_api(self): prog = paddle.static.Program() with paddle.static.program_guard(prog): self.generate_test_value() - out = paddle.add(self.l_value, self.r_value) - out_reverse = paddle.add(self.r_value, self.l_value) + out = self.l_value > self.r_value + out_reverse = self.r_value > self.l_value res = self.exe.run(prog, fetch_list=[out, out_reverse]) return res -create_test_case(TestAPIAddInStatic, 'float16', 'float32', 'float32') -create_test_case(TestAPIAddInStatic, 'float16', 'float64', 'float64') +create_test_case( + TestOperatorOverloadGreaterThanInStatic, 'float16', 'float32', 'bool' +) +create_test_case( + TestOperatorOverloadGreaterThanInStatic, 'float16', 'float64', 'bool' +) -create_test_case(TestAPIAddInStatic, 'float32', 'float64', 'float64') +create_test_case( + TestOperatorOverloadGreaterThanInStatic, 'float32', 'float64', 'bool' +) -if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): - create_test_case(TestAPIAddInStatic, 'bfloat16', 'float16', 'float32') - create_test_case(TestAPIAddInStatic, 'bfloat16', 'float32', 'float32') - create_test_case(TestAPIAddInStatic, 'bfloat16', 'float64', 'float64') +class TestAPIGreaterEqualInStatic(TestOperatorOverloadEqualInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + out = paddle.greater_equal(self.l_value, self.r_value) + out_reverse = paddle.greater_equal(self.r_value, self.l_value) -class TestOperatorOverloadSubInStatic(TestOperatorOverloadAddInStatic): + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case(TestAPIGreaterEqualInStatic, 'float16', 'float32', 'bool') +create_test_case(TestAPIGreaterEqualInStatic, 'float16', 'float64', 'bool') + +create_test_case(TestAPIGreaterEqualInStatic, 'float32', 'float64', 'bool') + + +class TestOperatorOverloadGreaterEqualInStatic( + TestOperatorOverloadEqualInStatic +): def run_api(self): prog = paddle.static.Program() with paddle.static.program_guard(prog): self.generate_test_value() - out = self.l_value - self.r_value - out_reverse = self.r_value - self.l_value + out = self.l_value >= self.r_value + out_reverse = self.r_value >= self.l_value res = self.exe.run(prog, fetch_list=[out, out_reverse]) return res create_test_case( - TestOperatorOverloadSubInStatic, 'float16', 'float32', 'float32' + TestOperatorOverloadGreaterEqualInStatic, 'float16', 'float32', 'bool' ) create_test_case( - TestOperatorOverloadSubInStatic, 'float16', 'float64', 'float64' + TestOperatorOverloadGreaterEqualInStatic, 'float16', 'float64', 'bool' ) create_test_case( - TestOperatorOverloadSubInStatic, 'float32', 'float64', 'float64' + TestOperatorOverloadGreaterEqualInStatic, 'float32', 'float64', 'bool' ) +class TestAPILogicalAndInStatic(TestOperatorOverloadEqualInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = paddle.logical_and(self.l_value, self.r_value) + out_reverse = paddle.logical_and(self.r_value, self.l_value) + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case(TestAPILogicalAndInStatic, 'float16', 'float32', 'bool') +create_test_case(TestAPILogicalAndInStatic, 'float16', 'float64', 'bool') + +create_test_case(TestAPILogicalAndInStatic, 'float32', 'float64', 'bool') + +create_test_case(TestAPILogicalAndInStatic, 'complex64', 'bool', 'bool') +create_test_case(TestAPILogicalAndInStatic, 'complex64', 'int8', 'bool') +create_test_case(TestAPILogicalAndInStatic, 'complex64', 'int16', 'bool') +create_test_case(TestAPILogicalAndInStatic, 'complex64', 'int32', 'bool') +create_test_case(TestAPILogicalAndInStatic, 'complex64', 'int64', 'bool') +create_test_case(TestAPILogicalAndInStatic, 'complex64', 'float16', 'bool') +create_test_case(TestAPILogicalAndInStatic, 'complex64', 'float32', 'bool') +create_test_case(TestAPILogicalAndInStatic, 'complex64', 'float64', 'bool') +create_test_case(TestAPILogicalAndInStatic, 'complex64', 'complex128', 'bool') + +create_test_case(TestAPILogicalAndInStatic, 'complex128', 'bool', 'bool') +create_test_case(TestAPILogicalAndInStatic, 'complex128', 'int8', 'bool') +create_test_case(TestAPILogicalAndInStatic, 'complex128', 'int16', 'bool') +create_test_case(TestAPILogicalAndInStatic, 'complex128', 'int32', 'bool') +create_test_case(TestAPILogicalAndInStatic, 'complex128', 'int64', 'bool') +create_test_case(TestAPILogicalAndInStatic, 'complex128', 'float16', 'bool') +create_test_case(TestAPILogicalAndInStatic, 'complex128', 'float32', 'bool') +create_test_case(TestAPILogicalAndInStatic, 'complex128', 'float64', 'bool') + + +class TestAPILogicalOrInStatic(TestOperatorOverloadEqualInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = paddle.logical_or(self.l_value, self.r_value) + out_reverse = paddle.logical_or(self.r_value, self.l_value) + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case(TestAPILogicalOrInStatic, 'float16', 'float32', 'bool') +create_test_case(TestAPILogicalOrInStatic, 'float16', 'float64', 'bool') + +create_test_case(TestAPILogicalOrInStatic, 'float32', 'float64', 'bool') + +create_test_case(TestAPILogicalOrInStatic, 'complex64', 'bool', 'bool') +create_test_case(TestAPILogicalOrInStatic, 'complex64', 'int8', 'bool') +create_test_case(TestAPILogicalOrInStatic, 'complex64', 'int16', 'bool') +create_test_case(TestAPILogicalOrInStatic, 'complex64', 'int32', 'bool') +create_test_case(TestAPILogicalOrInStatic, 'complex64', 'int64', 'bool') +create_test_case(TestAPILogicalOrInStatic, 'complex64', 'float16', 'bool') +create_test_case(TestAPILogicalOrInStatic, 'complex64', 'float32', 'bool') +create_test_case(TestAPILogicalOrInStatic, 'complex64', 'float64', 'bool') +create_test_case(TestAPILogicalOrInStatic, 'complex64', 'complex128', 'bool') + +create_test_case(TestAPILogicalOrInStatic, 'complex128', 'bool', 'bool') +create_test_case(TestAPILogicalOrInStatic, 'complex128', 'int8', 'bool') +create_test_case(TestAPILogicalOrInStatic, 'complex128', 'int16', 'bool') +create_test_case(TestAPILogicalOrInStatic, 'complex128', 'int32', 'bool') +create_test_case(TestAPILogicalOrInStatic, 'complex128', 'int64', 'bool') +create_test_case(TestAPILogicalOrInStatic, 'complex128', 'float16', 'bool') +create_test_case(TestAPILogicalOrInStatic, 'complex128', 'float32', 'bool') +create_test_case(TestAPILogicalOrInStatic, 'complex128', 'float64', 'bool') + + +class TestAPILogicalXorInStatic(TestOperatorOverloadEqualInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = paddle.logical_xor(self.l_value, self.r_value) + out_reverse = paddle.logical_xor(self.r_value, self.l_value) + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case(TestAPILogicalXorInStatic, 'float16', 'float32', 'bool') +create_test_case(TestAPILogicalXorInStatic, 'float16', 'float64', 'bool') + +create_test_case(TestAPILogicalXorInStatic, 'float32', 'float64', 'bool') + +create_test_case(TestAPILogicalXorInStatic, 'complex64', 'bool', 'bool') +create_test_case(TestAPILogicalXorInStatic, 'complex64', 'int8', 'bool') +create_test_case(TestAPILogicalXorInStatic, 'complex64', 'int16', 'bool') +create_test_case(TestAPILogicalXorInStatic, 'complex64', 'int32', 'bool') +create_test_case(TestAPILogicalXorInStatic, 'complex64', 'int64', 'bool') +create_test_case(TestAPILogicalXorInStatic, 'complex64', 'float16', 'bool') +create_test_case(TestAPILogicalXorInStatic, 'complex64', 'float32', 'bool') +create_test_case(TestAPILogicalXorInStatic, 'complex64', 'float64', 'bool') +create_test_case(TestAPILogicalXorInStatic, 'complex64', 'complex128', 'bool') + +create_test_case(TestAPILogicalXorInStatic, 'complex128', 'bool', 'bool') +create_test_case(TestAPILogicalXorInStatic, 'complex128', 'int8', 'bool') +create_test_case(TestAPILogicalXorInStatic, 'complex128', 'int16', 'bool') +create_test_case(TestAPILogicalXorInStatic, 'complex128', 'int32', 'bool') +create_test_case(TestAPILogicalXorInStatic, 'complex128', 'int64', 'bool') +create_test_case(TestAPILogicalXorInStatic, 'complex128', 'float16', 'bool') +create_test_case(TestAPILogicalXorInStatic, 'complex128', 'float32', 'bool') +create_test_case(TestAPILogicalXorInStatic, 'complex128', 'float64', 'bool') + + +class TestAPIFmaxInStatic(TestOperatorOverloadAddInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = paddle.fmax(self.l_value, self.r_value) + out_reverse = paddle.fmax(self.r_value, self.l_value) + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case(TestAPIFmaxInStatic, 'float16', 'float32', 'float32') +create_test_case(TestAPIFmaxInStatic, 'float16', 'float64', 'float64') + +create_test_case(TestAPIFmaxInStatic, 'float32', 'float64', 'float64') + + +class TestAPIFminInStatic(TestOperatorOverloadAddInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = paddle.fmin(self.l_value, self.r_value) + out_reverse = paddle.fmin(self.r_value, self.l_value) + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case(TestAPIFminInStatic, 'float16', 'float32', 'float32') +create_test_case(TestAPIFminInStatic, 'float16', 'float64', 'float64') + +create_test_case(TestAPIFminInStatic, 'float32', 'float64', 'float64') + + +class TestAPILogAddExpInStatic(TestOperatorOverloadAddInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = paddle.logaddexp(self.l_value, self.r_value) + out_reverse = paddle.logaddexp(self.r_value, self.l_value) + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case(TestAPILogAddExpInStatic, 'float16', 'float32', 'float32') +create_test_case(TestAPILogAddExpInStatic, 'float16', 'float64', 'float64') + +create_test_case(TestAPILogAddExpInStatic, 'float32', 'float64', 'float64') + + +class TestAPIMaximumInStatic(TestOperatorOverloadAddInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = paddle.maximum(self.l_value, self.r_value) + out_reverse = paddle.maximum(self.r_value, self.l_value) + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case(TestAPIMaximumInStatic, 'float32', 'float64', 'float64') + + +class TestAPIMiniumInStatic(TestOperatorOverloadAddInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = paddle.minimum(self.l_value, self.r_value) + out_reverse = paddle.maximum(self.r_value, self.l_value) + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case(TestAPIMiniumInStatic, 'float32', 'float64', 'float64') + + +class TestAPINextAfterInStatic(TestOperatorOverloadAddInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = paddle.nextafter(self.l_value, self.r_value) + out_reverse = paddle.nextafter(self.r_value, self.l_value) + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case(TestAPINextAfterInStatic, 'float32', 'float64', 'float64') + + +class TestAPIAtan2InStatic(TestOperatorOverloadAddInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = paddle.atan2(self.l_value, self.r_value) + out_reverse = paddle.atan2(self.r_value, self.l_value) + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case(TestAPIAtan2InStatic, 'float16', 'float32', 'float32') +create_test_case(TestAPIAtan2InStatic, 'float16', 'float64', 'float64') + +create_test_case(TestAPIAtan2InStatic, 'float32', 'float64', 'float64') + + +class TestAPIPoissonNllLossInStatic(TestOperatorOverloadAddInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = paddle.nn.functional.poisson_nll_loss( + self.l_value, self.r_value + ) + out_reverse = paddle.nn.functional.poisson_nll_loss( + self.r_value, self.l_value + ) + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case(TestAPIPoissonNllLossInStatic, 'float16', 'float32', 'float32') +create_test_case(TestAPIPoissonNllLossInStatic, 'float16', 'float64', 'float64') + +create_test_case(TestAPIPoissonNllLossInStatic, 'float32', 'float64', 'float64') + + if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): create_test_case( - TestOperatorOverloadSubInStatic, 'bfloat16', 'float16', 'float32' + TestAPIPoissonNllLossInStatic, 'bfloat16', 'float16', 'float32' ) create_test_case( - TestOperatorOverloadSubInStatic, 'bfloat16', 'float32', 'float32' + TestAPIPoissonNllLossInStatic, 'bfloat16', 'float32', 'float32' ) create_test_case( - TestOperatorOverloadSubInStatic, 'bfloat16', 'float64', 'float64' + TestAPIPoissonNllLossInStatic, 'bfloat16', 'float64', 'float64' ) -class TestAPISubInStatic(TestOperatorOverloadAddInStatic): +class TestAPIL1LossInStatic(TestOperatorOverloadAddInStatic): def run_api(self): prog = paddle.static.Program() with paddle.static.program_guard(prog): self.generate_test_value() - out = paddle.subtract(self.l_value, self.r_value) - out_reverse = paddle.subtract(self.r_value, self.l_value) + out = paddle.nn.functional.l1_loss(self.l_value, self.r_value) + out_reverse = paddle.nn.functional.l1_loss( + self.r_value, self.l_value + ) res = self.exe.run(prog, fetch_list=[out, out_reverse]) return res -create_test_case(TestAPISubInStatic, 'float16', 'float32', 'float32') -create_test_case(TestAPISubInStatic, 'float16', 'float64', 'float64') - -create_test_case(TestAPIAddInStatic, 'float32', 'float64', 'float64') +create_test_case(TestAPIL1LossInStatic, 'float16', 'float32', 'float32') +create_test_case(TestAPIL1LossInStatic, 'float16', 'float64', 'float64') - -if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): - create_test_case(TestAPISubInStatic, 'bfloat16', 'float16', 'float32') - create_test_case(TestAPISubInStatic, 'bfloat16', 'float32', 'float32') - create_test_case(TestAPISubInStatic, 'bfloat16', 'float64', 'float64') +create_test_case(TestAPIL1LossInStatic, 'float32', 'float64', 'float64') -class TestOperatorOverloadMulInStatic(TestOperatorOverloadAddInStatic): +class TestAPISmoothL1LossInStatic(TestOperatorOverloadAddInStatic): def run_api(self): prog = paddle.static.Program() with paddle.static.program_guard(prog): self.generate_test_value() - out = self.l_value * self.r_value - out_reverse = self.r_value * self.l_value + out = paddle.nn.functional.smooth_l1_loss( + self.l_value, self.r_value + ) + out_reverse = paddle.nn.functional.smooth_l1_loss( + self.r_value, self.l_value + ) res = self.exe.run(prog, fetch_list=[out, out_reverse]) return res -create_test_case( - TestOperatorOverloadMulInStatic, 'float16', 'float32', 'float32' -) -create_test_case( - TestOperatorOverloadMulInStatic, 'float16', 'float64', 'float64' -) +create_test_case(TestAPISmoothL1LossInStatic, 'float16', 'float32', 'float32') +create_test_case(TestAPISmoothL1LossInStatic, 'float16', 'float64', 'float64') -create_test_case( - TestOperatorOverloadMulInStatic, 'float32', 'float64', 'float64' -) +create_test_case(TestAPISmoothL1LossInStatic, 'float32', 'float64', 'float64') if paddle.is_compiled_with_cuda() and paddle.base.core.supports_bfloat16(): create_test_case( - TestOperatorOverloadMulInStatic, 'bfloat16', 'float16', 'float32' + TestAPISmoothL1LossInStatic, 'bfloat16', 'float16', 'float32' ) create_test_case( - TestOperatorOverloadMulInStatic, 'bfloat16', 'float32', 'float32' + TestAPISmoothL1LossInStatic, 'bfloat16', 'float32', 'float32' ) create_test_case( - TestOperatorOverloadMulInStatic, 'bfloat16', 'float64', 'float64' + TestAPISmoothL1LossInStatic, 'bfloat16', 'float64', 'float64' ) +class TestAPIMSELossInStatic(TestOperatorOverloadAddInStatic): + def run_api(self): + prog = paddle.static.Program() + with paddle.static.program_guard(prog): + self.generate_test_value() + + out = paddle.nn.functional.mse_loss(self.l_value, self.r_value) + out_reverse = paddle.nn.functional.mse_loss( + self.r_value, self.l_value + ) + + res = self.exe.run(prog, fetch_list=[out, out_reverse]) + return res + + +create_test_case(TestAPIMSELossInStatic, 'float32', 'float64', 'float64') + + +class TestTypePromotionRaiseError(unittest.TestCase): + def test_static_type_error(self): + paddle.enable_static() + with self.assertRaises(TypeError): + prog = paddle.static.Program() + exe = paddle.static.Executor() + with paddle.static.program_guard(prog): + a = paddle.ones([3, 3], dtype='float32') + b = paddle.ones([3, 3], dtype='float64') + out = a.__matmul__(b) + res = exe.run(prog, fetch_list=[out]) + return res + + def test_dygraph_type_error(self): + with self.assertRaises(TypeError): + a = paddle.ones([3, 3], dtype='float32') + b = paddle.ones([3, 3], dtype='int32') + return a + b + + if __name__ == '__main__': unittest.main() diff --git a/test/dygraph_to_static/test_break_continue.py b/test/dygraph_to_static/test_break_continue.py index e341f91e28c5f..824b00592af76 100644 --- a/test/dygraph_to_static/test_break_continue.py +++ b/test/dygraph_to_static/test_break_continue.py @@ -68,7 +68,7 @@ def test_continue_in_for_at_end(x): def test_continue_in_while(x): x = paddle.to_tensor(x) - i = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=0) + i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0) while i < 10: i += 1 if i > 5: @@ -100,7 +100,7 @@ def test_break_in_for_at_end(x): def test_break_in_while(x): x = paddle.to_tensor(x) - i = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=0) + i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0) while i < 10: i += 1 if i > 5: @@ -122,8 +122,8 @@ def test_break_continue_in_for(x): break x += 10086 - a = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=0) - b = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=3) + a = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0) + b = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=3) # b = 10 # TODO: add Raise Error and suggestion for usage: # Py for contains break/continue depends on control-flow. @@ -198,7 +198,7 @@ def test_optim_break_in_for(x): def test_optim_break_in_while(x): x = paddle.to_tensor(x) - i = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=0) + i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0) while i < 10: if i > 5: break diff --git a/test/dygraph_to_static/test_for_enumerate.py b/test/dygraph_to_static/test_for_enumerate.py index 3851f89aee04c..7a306d31d6207 100644 --- a/test/dygraph_to_static/test_for_enumerate.py +++ b/test/dygraph_to_static/test_for_enumerate.py @@ -178,7 +178,7 @@ def for_iter_var_list(x): # 2. iter list[var] y = paddle.tensor.fill_constant([1], 'int32', 0) for x in a: - y = y + x + y = y + x.astype('int32') return y @@ -195,7 +195,7 @@ def for_enumerate_var_list(x): z = paddle.tensor.fill_constant([1], 'int32', 0) for i, x in enumerate(a): y = y + i - z = z + x + z = z + x.astype('int32') return y, z @@ -244,7 +244,7 @@ def for_tuple_as_enumerate_iter(x_array): a_result = paddle.zeros([5]) for t in enumerate(x_list): - a_result += t[1] + a_result += t[1].astype('float32') return a_result diff --git a/test/indexing/test_setitem.py b/test/indexing/test_setitem.py index 818672dfd8d15..d9d7e8c9264c4 100644 --- a/test/indexing/test_setitem.py +++ b/test/indexing/test_setitem.py @@ -700,7 +700,7 @@ def test_combined_indexing_and_value_is_tensor_1(self): paddle.static.Program(), paddle.static.Program() ): x = paddle.ones((3, 3), dtype='int32') - v = paddle.to_tensor([-1, -1, -1]) + v = paddle.to_tensor([-1, -1, -1], dtype='int32') y = _setitem_static( x, (slice(None), [0, 2]), diff --git a/test/ir/pir/cinn/symbolic/test_llama_group_log_softmax.py b/test/ir/pir/cinn/symbolic/test_llama_group_log_softmax.py index 602367573cf3b..ea6952a196099 100644 --- a/test/ir/pir/cinn/symbolic/test_llama_group_log_softmax.py +++ b/test/ir/pir/cinn/symbolic/test_llama_group_log_softmax.py @@ -34,7 +34,9 @@ def update_scores_for_generation( ): # update scores - unfinished_scores = (scores * length + next_scores) / (length + 1) + unfinished_scores = (scores * length.astype(scores.dtype) + next_scores) / ( + length + 1 + ).astype(scores.dtype) return unfinished_scores diff --git a/test/legacy_test/test_elementwise_add_op.py b/test/legacy_test/test_elementwise_add_op.py index 2db69a70aada0..01572e097447d 100644 --- a/test/legacy_test/test_elementwise_add_op.py +++ b/test/legacy_test/test_elementwise_add_op.py @@ -725,7 +725,7 @@ class TestComplexElementwiseAddOp(OpTest): def setUp(self): self.op_type = "elementwise_add" self.python_api = paddle.add - self.dtype = np.float64 + self.dtype = np.complex128 self.shape = (2, 3, 4, 5) self.init_input_output() diff --git a/test/legacy_test/test_elementwise_sub_op.py b/test/legacy_test/test_elementwise_sub_op.py index 2ee7c8090d989..1053002c41cf5 100644 --- a/test/legacy_test/test_elementwise_sub_op.py +++ b/test/legacy_test/test_elementwise_sub_op.py @@ -819,7 +819,7 @@ def setUp(self): self.python_api = paddle.subtract self.public_python_api = paddle.subtract self.prim_op_type = "prim" - self.dtype = np.float64 + self.dtype = np.complex128 self.shape = (2, 3, 4, 5) self.init_input_output() @@ -833,7 +833,7 @@ def setUp(self): self.if_enable_cinn() def init_base_dtype(self): - self.dtype = np.float64 + self.dtype = np.complex128 def init_input_output(self): self.x = np.random.random(self.shape).astype( diff --git a/test/legacy_test/test_modelaverage.py b/test/legacy_test/test_modelaverage.py index 0d3aa82f38f64..61ff09ea30461 100644 --- a/test/legacy_test/test_modelaverage.py +++ b/test/legacy_test/test_modelaverage.py @@ -111,7 +111,7 @@ def test_model_average_static(self): average_b = (sum_1 + sum_2 + sum_3) / ( num_accumulates + old_num_accumulates - ) + ).astype('float32') if in_pir_mode(): ops = test_program.global_block().ops fetch_list = [ @@ -197,9 +197,13 @@ def train(layer, loader, loss_fn, opt, model_average): ) return ( - (sum_1 + sum_2 + sum_3) - / (num_accumulates + old_num_accumulates) - ).numpy() + ( + (sum_1 + sum_2 + sum_3) + / (num_accumulates + old_num_accumulates).astype('float32') + ) + .astype(sum_1.dtype) + .numpy() + ) def evaluate(layer, loader, loss_fn, check_param): for batch_id, (image, label) in enumerate(loader()): diff --git a/test/legacy_test/test_multiply.py b/test/legacy_test/test_multiply.py index ee297bdcd2789..ba7decc5c58a2 100755 --- a/test/legacy_test/test_multiply.py +++ b/test/legacy_test/test_multiply.py @@ -149,7 +149,7 @@ def test_errors(self): y_data = np.random.randn(200).astype(np.float64) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) - self.assertRaises(ValueError, paddle.multiply, x, y) + self.assertRaises(TypeError, paddle.multiply, x, y) # test dynamic computation graph: dtype must be Tensor type x_data = np.random.randn(200).astype(np.int64) diff --git a/test/legacy_test/test_sparse_addmm_op.py b/test/legacy_test/test_sparse_addmm_op.py index 43be65aba3d1a..5ee8cca78b4ce 100644 --- a/test/legacy_test/test_sparse_addmm_op.py +++ b/test/legacy_test/test_sparse_addmm_op.py @@ -44,7 +44,9 @@ def check_result(self, input_shape, x_shape, y_shape, format): mask = paddle.randint(0, 2, x_shape) origin_input = paddle.rand(input_shape) - origin_x = paddle.rand(x_shape) * mask + origin_x = paddle.rand(x_shape) * mask.astype( + paddle.get_default_dtype() + ) origin_y = paddle.rand(y_shape) dense_input = origin_input.detach() @@ -77,7 +79,9 @@ def check_result(self, input_shape, x_shape, y_shape, format): ) np.testing.assert_allclose( sp_x.grad.to_dense().numpy(), - (dense_x.grad * mask).numpy(), + ( + dense_x.grad * mask.astype(paddle.get_default_dtype()) + ).numpy(), rtol=1e-05, ) np.testing.assert_allclose( diff --git a/test/legacy_test/test_sparse_matmul_op.py b/test/legacy_test/test_sparse_matmul_op.py index ae08b7df48c53..db942dead1b75 100644 --- a/test/legacy_test/test_sparse_matmul_op.py +++ b/test/legacy_test/test_sparse_matmul_op.py @@ -43,7 +43,9 @@ def check_result(self, x_shape, y_shape, format): mask = paddle.randint(0, 2, [x_shape[-2], x_shape[-1]]) else: mask = paddle.randint(0, 2, x_shape) - origin_x = paddle.rand(x_shape) * mask + origin_x = paddle.rand(x_shape) * mask.astype( + paddle.get_default_dtype() + ) origin_y = paddle.rand(y_shape) dense_x = origin_x.detach() @@ -69,7 +71,7 @@ def check_result(self, x_shape, y_shape, format): sp_out.backward() np.testing.assert_allclose( sp_x.grad.to_dense().numpy(), - (dense_x.grad * mask).numpy(), + (dense_x.grad * mask.astype(dense_x.dtype)).numpy(), rtol=1e-05, ) np.testing.assert_allclose( @@ -275,7 +277,7 @@ def test_masked_matmul_3d(self): paddle.set_default_dtype('float32') origin_x = paddle.rand([16, 16, 12]) mask = paddle.randint(0, 2, [16, 12]) - origin_x = origin_x * mask + origin_x = origin_x * mask.astype('float32') origin_y = paddle.rand([16, 12, 10]) dense_x = origin_x.detach() @@ -297,7 +299,7 @@ def test_masked_matmul_3d(self): ) np.testing.assert_allclose( sp_x.grad.to_dense().numpy(), - (dense_x.grad * mask).numpy(), + (dense_x.grad * mask.astype('float32')).numpy(), rtol=1e-05, ) np.testing.assert_allclose( diff --git a/test/legacy_test/test_sparse_mv_op.py b/test/legacy_test/test_sparse_mv_op.py index 2ecf4fc45dd5c..c1e3182b42729 100644 --- a/test/legacy_test/test_sparse_mv_op.py +++ b/test/legacy_test/test_sparse_mv_op.py @@ -45,7 +45,7 @@ def test_mv(self): paddle.set_default_dtype('float64') origin_x = paddle.rand([64, 32]) mask = paddle.randint(0, 2, [64, 32]) - origin_x = origin_x * mask + origin_x = origin_x * mask.astype('float64') origin_vec = paddle.rand([32]) dense_x = origin_x.detach() @@ -67,7 +67,7 @@ def test_mv(self): ) np.testing.assert_allclose( sp_x.grad.to_dense().numpy(), - (dense_x.grad * mask).numpy(), + (dense_x.grad * mask.astype('float64')).numpy(), rtol=1e-05, ) np.testing.assert_allclose( @@ -85,7 +85,7 @@ def test_mv(self): paddle.set_default_dtype('float64') origin_x = paddle.rand([64, 32]) mask = paddle.randint(0, 2, [64, 32]) - origin_x = origin_x * mask + origin_x = origin_x * mask.astype('float64') origin_vec = paddle.rand([32]) dense_x = origin_x.detach() @@ -107,7 +107,7 @@ def test_mv(self): ) np.testing.assert_allclose( sp_x.grad.to_dense().numpy(), - (dense_x.grad * mask).numpy(), + (dense_x.grad * mask.astype('float64')).numpy(), rtol=1e-05, ) np.testing.assert_allclose( diff --git a/test/legacy_test/test_tensor_scalar_type_promotion_dynamic.py b/test/legacy_test/test_tensor_scalar_type_promotion_dynamic.py index 2f6541f67968e..222955108c9dc 100644 --- a/test/legacy_test/test_tensor_scalar_type_promotion_dynamic.py +++ b/test/legacy_test/test_tensor_scalar_type_promotion_dynamic.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,11 +20,13 @@ # Support types are ref from `paddle.tensor.math` # - Related paddle dtypes: -# - int type: int64, (no test here: uint8, int8, int16, int32) -# - float type: float32, (no test here: float64) +# - int type: int64, uint8, int8, int16, int32 +# - float type: float16, bfloat16, float32, float64 +# - complex type: complex64, complex128 # - Python scalar dtypes: # - int(64) -# - float(64) +# - float(32) +# - complex(64) class TestTensorScalarTypePromotionDynamic(unittest.TestCase): @@ -313,5 +315,418 @@ def test_tensor_mod_scalar(self): self.check_operation(a, b, c, '%') +def create_test_case( + baseclass, + dtype, + expected_out_dtype_with_int=None, + expected_out_dtype_with_float=None, + expected_out_dtype_with_complex=None, +): + class TestPromotion(baseclass): + def set_dtype(self): + self.dtype = dtype + self.expected_out_dtype_with_int = expected_out_dtype_with_int + self.expected_out_dtype_with_float = expected_out_dtype_with_float + self.expected_out_dtype_with_complex = ( + expected_out_dtype_with_complex + ) + + cls_name = f"{baseclass.__name__}{dtype}" + TestPromotion.__name__ = cls_name + globals()[cls_name] = TestPromotion + + +class TestTensorAddScalar(unittest.TestCase): + def setUp(self): + self.set_dtype() + + def set_dtype(self): + self.dtype = 'float32' + self.expected_out_dtype_with_int = 'float32' + self.expected_out_dtype_with_float = 'float32' + self.expected_out_dtype_with_complex = 'complex64' + + def generate_test_value(self): + self.value = paddle.rand([2, 3, 4]).astype(self.dtype) + + def run_api(self): + self.generate_test_value() + out_int = self.value + 1 + out_float = self.value + 1.0 + out_complex = self.value + 2j + return out_int, out_float, out_complex + + def test_dtype_is_expected(self): + res_int, res_float, res_complex = self.run_api() + self.assertEqual( + res_int.dtype.__str__(), + "paddle." + self.expected_out_dtype_with_int, + ) + self.assertEqual( + res_float.dtype.__str__(), + "paddle." + self.expected_out_dtype_with_float, + ) + self.assertEqual( + res_complex.dtype.__str__(), + "paddle." + self.expected_out_dtype_with_complex, + ) + + +create_test_case(TestTensorAddScalar, 'bool', 'int64', 'float32', 'complex64') +create_test_case(TestTensorAddScalar, 'uint8', 'uint8', 'float32', 'complex64') +create_test_case(TestTensorAddScalar, 'int8', 'int8', 'float32', 'complex64') +create_test_case(TestTensorAddScalar, 'int32', 'int32', 'float32', 'complex64') +create_test_case(TestTensorAddScalar, 'int64', 'int64', 'float32', 'complex64') +create_test_case( + TestTensorAddScalar, 'float16', 'float16', 'float16', 'complex64' +) +create_test_case( + TestTensorAddScalar, 'bfloat16', 'bfloat16', 'bfloat16', 'complex64' +) +create_test_case( + TestTensorAddScalar, 'float64', 'float64', 'float64', 'complex128' +) +create_test_case( + TestTensorAddScalar, 'complex64', 'complex64', 'complex64', 'complex64' +) +create_test_case( + TestTensorAddScalar, 'complex128', 'complex128', 'complex128', 'complex128' +) + + +class TestTensorSubScalar(unittest.TestCase): + def setUp(self): + self.set_dtype() + + def set_dtype(self): + self.dtype = 'float32' + self.expected_out_dtype_with_int = 'float32' + self.expected_out_dtype_with_float = 'float32' + self.expected_out_dtype_with_complex = 'complex64' + + def generate_test_value(self): + self.value = paddle.rand([2, 3, 4]).astype(self.dtype) + + def run_api(self): + self.generate_test_value() + out_int = self.value - 1 + out_float = self.value - 1.0 + out_complex = self.value - 2j + return out_int, out_float, out_complex + + def test_dtype_is_expected(self): + res_int, res_float, res_complex = self.run_api() + self.assertEqual( + res_int.dtype.__str__(), + "paddle." + self.expected_out_dtype_with_int, + ) + self.assertEqual( + res_float.dtype.__str__(), + "paddle." + self.expected_out_dtype_with_float, + ) + self.assertEqual( + res_complex.dtype.__str__(), + "paddle." + self.expected_out_dtype_with_complex, + ) + + +create_test_case(TestTensorSubScalar, 'bool', 'int64', 'float32', 'complex64') +create_test_case(TestTensorSubScalar, 'uint8', 'uint8', 'float32', 'complex64') +create_test_case(TestTensorSubScalar, 'int8', 'int8', 'float32', 'complex64') +create_test_case(TestTensorSubScalar, 'int32', 'int32', 'float32', 'complex64') +create_test_case(TestTensorSubScalar, 'int64', 'int64', 'float32', 'complex64') +create_test_case( + TestTensorSubScalar, 'float16', 'float16', 'float16', 'complex64' +) +create_test_case( + TestTensorSubScalar, 'bfloat16', 'bfloat16', 'bfloat16', 'complex64' +) +create_test_case( + TestTensorSubScalar, 'float64', 'float64', 'float64', 'complex128' +) +create_test_case( + TestTensorSubScalar, 'complex64', 'complex64', 'complex64', 'complex64' +) +create_test_case( + TestTensorSubScalar, 'complex128', 'complex128', 'complex128', 'complex128' +) + + +class TestTensorDivScalar(unittest.TestCase): + def setUp(self): + self.set_dtype() + + def set_dtype(self): + self.dtype = 'float32' + self.expected_out_dtype_with_int = 'float32' + self.expected_out_dtype_with_float = 'float32' + self.expected_out_dtype_with_complex = 'complex64' + + def generate_test_value(self): + self.value = paddle.rand([2, 3, 4]).astype(self.dtype) + + def run_api(self): + self.generate_test_value() + out_int = self.value / 1 + out_float = self.value / 1.0 + out_complex = self.value / 2j + return out_int, out_float, out_complex + + def test_dtype_is_expected(self): + res_int, res_float, res_complex = self.run_api() + self.assertEqual( + res_int.dtype.__str__(), + "paddle." + self.expected_out_dtype_with_int, + ) + self.assertEqual( + res_float.dtype.__str__(), + "paddle." + self.expected_out_dtype_with_float, + ) + self.assertEqual( + res_complex.dtype.__str__(), + "paddle." + self.expected_out_dtype_with_complex, + ) + + +create_test_case(TestTensorDivScalar, 'bool', 'float32', 'float32', 'complex64') +create_test_case( + TestTensorDivScalar, 'uint8', 'float32', 'float32', 'complex64' +) +create_test_case(TestTensorDivScalar, 'int8', 'float32', 'float32', 'complex64') +create_test_case( + TestTensorDivScalar, 'int32', 'float32', 'float32', 'complex64' +) +create_test_case( + TestTensorDivScalar, 'int64', 'float32', 'float32', 'complex64' +) +create_test_case( + TestTensorDivScalar, 'float16', 'float16', 'float16', 'complex64' +) +create_test_case( + TestTensorDivScalar, 'bfloat16', 'bfloat16', 'bfloat16', 'complex64' +) +create_test_case( + TestTensorDivScalar, 'float64', 'float64', 'float64', 'complex128' +) +create_test_case( + TestTensorDivScalar, 'complex64', 'complex64', 'complex64', 'complex64' +) +create_test_case( + TestTensorDivScalar, 'complex128', 'complex128', 'complex128', 'complex128' +) + + +class TestTensorMulScalar(unittest.TestCase): + def setUp(self): + self.set_dtype() + + def set_dtype(self): + self.dtype = 'float32' + self.expected_out_dtype_with_int = 'float32' + self.expected_out_dtype_with_float = 'float32' + self.expected_out_dtype_with_complex = 'complex64' + + def generate_test_value(self): + self.value = paddle.rand([2, 3, 4]).astype(self.dtype) + + def run_api(self): + self.generate_test_value() + out_int = self.value * 1 + out_float = self.value * 1.0 + out_complex = self.value * 2j + return out_int, out_float, out_complex + + def test_dtype_is_expected(self): + res_int, res_float, res_complex = self.run_api() + self.assertEqual( + res_int.dtype.__str__(), + "paddle." + self.expected_out_dtype_with_int, + ) + self.assertEqual( + res_float.dtype.__str__(), + "paddle." + self.expected_out_dtype_with_float, + ) + self.assertEqual( + res_complex.dtype.__str__(), + "paddle." + self.expected_out_dtype_with_complex, + ) + + +create_test_case(TestTensorMulScalar, 'bool', 'int64', 'float32', 'complex64') +create_test_case(TestTensorMulScalar, 'uint8', 'uint8', 'float32', 'complex64') +create_test_case(TestTensorMulScalar, 'int8', 'int8', 'float32', 'complex64') +create_test_case(TestTensorMulScalar, 'int32', 'int32', 'float32', 'complex64') +create_test_case(TestTensorMulScalar, 'int64', 'int64', 'float32', 'complex64') +create_test_case( + TestTensorMulScalar, 'float16', 'float16', 'float16', 'complex64' +) +create_test_case( + TestTensorMulScalar, 'bfloat16', 'bfloat16', 'bfloat16', 'complex64' +) +create_test_case( + TestTensorMulScalar, 'float64', 'float64', 'float64', 'complex128' +) +create_test_case( + TestTensorMulScalar, 'complex64', 'complex64', 'complex64', 'complex64' +) +create_test_case( + TestTensorMulScalar, 'complex128', 'complex128', 'complex128', 'complex128' +) + + +class TestTensorPowScalar(unittest.TestCase): + def setUp(self): + self.set_dtype() + + def set_dtype(self): + self.dtype = 'float32' + self.expected_out_dtype_with_int = 'float32' + self.expected_out_dtype_with_float = 'float32' + self.expected_out_dtype_with_complex = 'complex64' + + def generate_test_value(self): + self.value = paddle.rand([2, 3, 4]).astype(self.dtype) + + def run_api(self): + self.generate_test_value() + out_int = self.value**1 + out_float = self.value**1.0 + # pow API not support complex + out_complex = None + return out_int, out_float, out_complex + + def test_dtype_is_expected(self): + res_int, res_float, res_complex = self.run_api() + self.assertEqual( + res_int.dtype.__str__(), + "paddle." + self.expected_out_dtype_with_int, + ) + self.assertEqual( + res_float.dtype.__str__(), + "paddle." + self.expected_out_dtype_with_float, + ) + # self.assertEqual(res_complex.dtype.__str__(), "paddle." + self.expected_out_dtype_with_complex) + + +# pow API support int32, int64, float64, float32. +create_test_case(TestTensorPowScalar, 'int32', 'int32', 'float32', 'complex64') +create_test_case(TestTensorPowScalar, 'int64', 'int64', 'float32', 'complex64') + +create_test_case( + TestTensorPowScalar, 'float64', 'float64', 'float64', 'complex128' +) + + +class TestTensorFloorDivScalar(unittest.TestCase): + def setUp(self): + self.set_dtype() + + def set_dtype(self): + self.dtype = 'float32' + self.expected_out_dtype_with_int = 'float32' + self.expected_out_dtype_with_float = 'float32' + self.expected_out_dtype_with_complex = 'complex64' + + def generate_test_value(self): + self.value = paddle.rand([2, 3, 4]).astype(self.dtype) + + def run_api(self): + self.generate_test_value() + out_int = self.value // 1 + out_float = self.value // 1.0 + # floor_div API not support complex + out_complex = None + return out_int, out_float, out_complex + + def test_dtype_is_expected(self): + res_int, res_float, res_complex = self.run_api() + self.assertEqual( + res_int.dtype.__str__(), + "paddle." + self.expected_out_dtype_with_int, + ) + self.assertEqual( + res_float.dtype.__str__(), + "paddle." + self.expected_out_dtype_with_float, + ) + # self.assertEqual(res_complex.dtype.__str__(), "paddle." + self.expected_out_dtype_with_complex) + + +# floor_div API not support complex64, complex128 +create_test_case( + TestTensorFloorDivScalar, 'bool', 'int64', 'float32', 'complex64' +) +create_test_case( + TestTensorFloorDivScalar, 'uint8', 'uint8', 'float32', 'complex64' +) +create_test_case( + TestTensorFloorDivScalar, 'int8', 'int8', 'float32', 'complex64' +) +create_test_case( + TestTensorFloorDivScalar, 'int32', 'int32', 'float32', 'complex64' +) +create_test_case( + TestTensorFloorDivScalar, 'int64', 'int64', 'float32', 'complex64' +) +create_test_case( + TestTensorFloorDivScalar, 'float16', 'float16', 'float16', 'complex64' +) +create_test_case( + TestTensorFloorDivScalar, 'bfloat16', 'bfloat16', 'bfloat16', 'complex64' +) +create_test_case( + TestTensorFloorDivScalar, 'float64', 'float64', 'float64', 'complex128' +) + + +class TestTensorModScalar(unittest.TestCase): + def setUp(self): + self.set_dtype() + + def set_dtype(self): + self.dtype = 'float32' + self.expected_out_dtype_with_int = 'float32' + self.expected_out_dtype_with_float = 'float32' + self.expected_out_dtype_with_complex = 'complex64' + + def generate_test_value(self): + self.value = paddle.rand([2, 3, 4]).astype(self.dtype) + + def run_api(self): + self.generate_test_value() + out_int = self.value % 1 + out_float = self.value % 1.0 + # mod API not support complex + out_complex = None + return out_int, out_float, out_complex + + def test_dtype_is_expected(self): + res_int, res_float, res_complex = self.run_api() + self.assertEqual( + res_int.dtype.__str__(), + "paddle." + self.expected_out_dtype_with_int, + ) + self.assertEqual( + res_float.dtype.__str__(), + "paddle." + self.expected_out_dtype_with_float, + ) + # self.assertEqual(res_complex.dtype.__str__(), "paddle." + self.expected_out_dtype_with_complex) + + +# mod API support float32, float64, int32, int64 +create_test_case(TestTensorModScalar, 'int32', 'int32', 'float32', 'complex64') +create_test_case(TestTensorModScalar, 'int64', 'int64', 'float32', 'complex64') +create_test_case( + TestTensorModScalar, 'float64', 'float64', 'float64', 'complex128' +) + + +class Test0DTensor(unittest.TestCase): + def test_0d_add_0d(self): + a = paddle.ones([], dtype='int32') + b = paddle.ones([], dtype='int64') + res = a / b + return res + + if __name__ == '__main__': unittest.main() diff --git a/test/legacy_test/test_trapezoid.py b/test/legacy_test/test_trapezoid.py index e1b8697ccff15..2a27e401b828d 100644 --- a/test/legacy_test/test_trapezoid.py +++ b/test/legacy_test/test_trapezoid.py @@ -155,7 +155,7 @@ class TestTrapezoidAxis1(TestTrapezoidAPI): def set_args(self): self.y = np.random.random((3, 3, 4)).astype('float32') self.x = None - self.dx = 1 + self.dx = 1.0 self.axis = 1 diff --git a/test/sot/test_18_tensor_method.py b/test/sot/test_18_tensor_method.py index 2591db1f748d9..d243ee1f7611e 100644 --- a/test/sot/test_18_tensor_method.py +++ b/test/sot/test_18_tensor_method.py @@ -48,7 +48,12 @@ def tensor_method_property(a: paddle.Tensor, b: paddle.Tensor): a.type, a.is_tensor(), a.clear_gradient(), - a @ b.T + len(a.shape) + b.size + a.ndim + a.dim() + a.rank(), + a @ b.T.astype(a.dtype) + + len(a.shape) + + b.size + + a.ndim + + a.dim() + + a.rank(), )