From 5337362af1fe59e67eaeadb1f7470f1ec19b9ccb Mon Sep 17 00:00:00 2001 From: zyfncg Date: Fri, 3 Feb 2023 07:06:45 +0000 Subject: [PATCH 1/5] remove axis in some elementwise api --- python/paddle/fluid/layers/nn.py | 22 ----- .../ir/inference/test_trt_elementwise_op.py | 8 +- .../unittests/test_elementwise_nn_grad.py | 2 +- python/paddle/tensor/math.py | 96 +++---------------- 4 files changed, 18 insertions(+), 110 deletions(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 1dd819df411687..4cb8b8f88fc8da 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -101,28 +101,6 @@ def _get_reduce_dim(dim, input): return reduce_all, dim -@dygraph_only -def _elementwise_op_in_dygraph( - x, y, axis=-1, act=None, use_mkldnn=False, op_name=None -): - def is_inplace(op_name): - return op_name[-1] == "_" - - if op_name not in OP_NAMEMAPPING.keys() or axis != -1: - op = getattr(_legacy_C_ops, op_name) - out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn) - else: - if in_dygraph_mode(): - op = getattr( - _C_ops, - OP_NAMEMAPPING[op_name] if not is_inplace(op_name) else op_name, - ) - out = op(x, y) - return dygraph_utils._append_activation_in_dygraph( - out, act, use_mkldnn=use_mkldnn - ) - - @deprecated(since="2.0.0", update_to="paddle.nn.functional.embedding") def embedding( input, diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_elementwise_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_elementwise_op.py index 7674a226586888..95055040606861 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_elementwise_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_elementwise_op.py @@ -50,7 +50,7 @@ def setUp(self): self.fetch_list = [out] def append_eltwise(self, data1, data2): - return paddle.tensor.math._add_with_axis(x=data1, y=data2, axis=0) + return paddle.tensor.math.add(x=data1, y=data2) def test_check_output(self): if os.path.exists(self.path + "_opt_cache"): @@ -67,21 +67,21 @@ class TensorRTSubgraphPassElementwiseBroadcastTest1( TensorRTSubgraphPassElementwiseBroadcastTest ): def append_eltwise(self, data1, data2): - return paddle.tensor.math._subtract_with_axis(x=data1, y=data2, axis=0) + return paddle.tensor.math.subtract(x=data1, y=data2) class TensorRTSubgraphPassElementwiseBroadcastTest2( TensorRTSubgraphPassElementwiseBroadcastTest ): def append_eltwise(self, data1, data2): - return paddle.tensor.math._multiply_with_axis(x=data1, y=data2, axis=0) + return paddle.tensor.math.multiply(x=data1, y=data2) class TensorRTSubgraphPassElementwiseBroadcastTest3( TensorRTSubgraphPassElementwiseBroadcastTest ): def append_eltwise(self, data1, data2): - return paddle.tensor.math._divide_with_axis(x=data1, y=data2, axis=0) + return paddle.tensor.math.divide(x=data1, y=data2) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_nn_grad.py b/python/paddle/fluid/tests/unittests/test_elementwise_nn_grad.py index 2c5da64817d5f2..0b9c8c9a54928f 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_nn_grad.py @@ -222,7 +222,7 @@ def func(self, place): y = paddle.static.data('y', shape, dtype) x.persistable = True y.persistable = True - out = paddle.tensor.math._divide_with_axis(x, y, axis=0) + out = paddle.tensor.math.divide(x, y) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) y_arr = np.random.uniform(-1, 1, shape).astype(dtype) y_arr[np.abs(y_arr) < 0.005] = 0.02 diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 6f797b82e1d083..4cd9f61907c5b8 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -480,31 +480,6 @@ def pow(x, y, name=None): } -@dygraph_only -def _elementwise_op_in_dygraph( - x, y, axis=-1, act=None, use_mkldnn=False, op_name=None -): - def is_inplace(op_name): - return op_name[-1] == "_" - - if op_name not in OP_NAMEMAPPING.keys() or axis != -1: - op = getattr(_legacy_C_ops, op_name) - out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn) - else: - if in_dygraph_mode(): - op = getattr( - _C_ops, - OP_NAMEMAPPING[op_name] if not is_inplace(op_name) else op_name, - ) - out = op(x, y) - if act is None: - return out - else: - return dygraph_utils._append_activation_in_dygraph( - out, act, use_mkldnn=use_mkldnn - ) - - def _elementwise_op(helper): op_type = helper.layer_type original_op_type = helper.kwargs.get('original_op_type', op_type) @@ -627,11 +602,7 @@ def add_(x, y, name=None): ) ) - if in_dygraph_mode(): - return _C_ops.add_(x, y) - else: - out = _elementwise_op_in_dygraph(x, y, axis=axis, op_name=op_type) - return out + return _C_ops.add_(x, y) def subtract(x, y, name=None): @@ -690,13 +661,10 @@ def subtract(x, y, name=None): # Tensor(shape=[3], dtype=float64, place=Place(cpu), stop_gradient=True, # [ 4. , inf., -inf.]) """ - op_type = 'elementwise_sub' - axis = -1 - act = None if in_dygraph_mode(): return _C_ops.subtract(x, y) else: - return _elementwise_op(LayerHelper(op_type, **locals())) + return _elementwise_op(LayerHelper('elementwise_sub', **locals())) @inplace_apis_in_dygraph_only @@ -716,13 +684,7 @@ def subtract_(x, y, name=None): ) ) - if in_dygraph_mode(): - return _C_ops.subtract_(x, y) - else: - out = _elementwise_op_in_dygraph( - x, y, axis=axis, act=act, op_name='elementwise_sub_' - ) - return out + return _C_ops.subtract_(x, y) def divide(x, y, name=None): @@ -757,13 +719,10 @@ def divide(x, y, name=None): print(z) # [2., 0.6, 2.] """ - op_type = 'elementwise_div' - axis = -1 - act = None if in_dygraph_mode(): return _C_ops.divide(x, y) else: - return _elementwise_op(LayerHelper(op_type, **locals())) + return _elementwise_op(LayerHelper('elementwise_div', **locals())) def floor_divide(x, y, name=None): @@ -800,12 +759,10 @@ def floor_divide(x, y, name=None): print(z) # [2, 0, 2, 2] """ - op_type = 'elementwise_floordiv' - axis = -1 if in_dygraph_mode(): return _C_ops.floor_divide(x, y) else: - return _elementwise_op(LayerHelper(op_type, **locals())) + return _elementwise_op(LayerHelper('elementwise_floordiv', **locals())) def remainder(x, y, name=None): @@ -841,13 +798,10 @@ def remainder(x, y, name=None): print(z) # [0, 3, 2, 1] """ - op_type = 'elementwise_mod' - axis = -1 - if in_dygraph_mode(): return _C_ops.remainder(x, y) else: - return _elementwise_op(LayerHelper(op_type, **locals())) + return _elementwise_op(LayerHelper('elementwise_mod', **locals())) @inplace_apis_in_dygraph_only @@ -866,8 +820,7 @@ def remainder_(x, y, name=None): out_shape, x.shape ) ) - - return _elementwise_op_in_dygraph(x, y, axis=axis, op_name=op_type) + return _C_ops.remainder_(x, y) mod = remainder # noqa: F841 @@ -911,10 +864,6 @@ def multiply(x, y, name=None): print(res) # [[[2, 4, 6], [2, 4, 6]]] """ - op_type = 'elementwise_mul' - act = None - axis = -1 - if in_dygraph_mode(): return _C_ops.multiply(x, y) else: @@ -924,7 +873,7 @@ def multiply(x, y, name=None): % (x.dtype, y.dtype) ) - return _elementwise_op(LayerHelper(op_type, **locals())) + return _elementwise_op(LayerHelper('elementwise_mul', **locals())) @dygraph_only @@ -958,7 +907,6 @@ def _add_with_axis(x, y, axis=-1, name=None): return _elementwise_op_with_axis_in_dygraph(x, y, axis, name, "add") else: op_type = 'elementwise_add' - act = None return _elementwise_op(LayerHelper(op_type, **locals())) @@ -970,7 +918,6 @@ def _subtract_with_axis(x, y, axis=-1, name=None): ) else: op_type = 'elementwise_sub' - act = None return _elementwise_op(LayerHelper(op_type, **locals())) @@ -982,7 +929,6 @@ def _multiply_with_axis(x, y, axis=-1, name=None): ) else: op_type = 'elementwise_mul' - act = None return _elementwise_op(LayerHelper(op_type, **locals())) @@ -992,7 +938,6 @@ def _divide_with_axis(x, y, axis=-1, name=None): return _elementwise_op_with_axis_in_dygraph(x, y, axis, name, "divide") else: op_type = 'elementwise_div' - act = None return _elementwise_op(LayerHelper(op_type, **locals())) @@ -1052,13 +997,10 @@ def maximum(x, y, name=None): # Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True, # [5. , 3. , inf.]) """ - op_type = 'elementwise_max' - axis = -1 - act = None if in_dygraph_mode(): return _C_ops.maximum(x, y) else: - return _elementwise_op(LayerHelper(op_type, **locals())) + return _elementwise_op(LayerHelper('elementwise_max', **locals())) def minimum(x, y, name=None): @@ -1117,13 +1059,10 @@ def minimum(x, y, name=None): # Tensor(shape=[3], dtype=float64, place=Place(cpu), stop_gradient=True, # [ 1. , -inf., 5. ]) """ - op_type = 'elementwise_min' - axis = -1 - act = None if in_dygraph_mode(): return _C_ops.minimum(x, y) else: - return _elementwise_op(LayerHelper(op_type, **locals())) + return _elementwise_op(LayerHelper('elementwise_min', **locals())) def fmax(x, y, name=None): @@ -1184,13 +1123,10 @@ def fmax(x, y, name=None): # Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True, # [5. , 3. , inf.]) """ - op_type = 'elementwise_fmax' - axis = -1 - act = None if in_dygraph_mode(): return _C_ops.fmax(x, y) else: - return _elementwise_op(LayerHelper(op_type, **locals())) + return _elementwise_op(LayerHelper('elementwise_fmax', **locals())) def fmin(x, y, name=None): @@ -1251,13 +1187,10 @@ def fmin(x, y, name=None): # Tensor(shape=[3], dtype=float64, place=Place(cpu), stop_gradient=True, # [ 1. , -inf., 5. ]) """ - op_type = 'elementwise_fmin' - axis = -1 - act = None if in_dygraph_mode(): return _C_ops.fmin(x, y) else: - return _elementwise_op(LayerHelper(op_type, **locals())) + return _elementwise_op(LayerHelper('elementwise_fmin', **locals())) def sum(x, axis=None, dtype=None, keepdim=False, name=None): @@ -4888,9 +4821,6 @@ def frac(x, name=None): # [[ 0.22000003, -0.02999997], # [-0.54999995, 0.66000003]]) """ - op_type = 'elementwise_sub' - axis = -1 - act = None if x.dtype not in [ paddle.int32, paddle.int64, @@ -4917,7 +4847,7 @@ def frac(x, name=None): helper.append_op( type="trunc", inputs=inputs, attrs=attrs, outputs={"Out": y} ) - return _elementwise_op(LayerHelper(op_type, **locals())) + return _elementwise_op(LayerHelper('elementwise_sub', **locals())) def sgn(x, name=None): From 794058ad8605d7ff57a39911057d425c23acea6b Mon Sep 17 00:00:00 2001 From: zyfncg Date: Mon, 6 Feb 2023 02:52:24 +0000 Subject: [PATCH 2/5] fix inplace bug eager-gen --- .../auto_code_generator/eager_generator.cc | 3 +- .../generator/eager_gen.py | 63 +++++++++++-------- paddle/fluid/eager/utils.h | 5 +- paddle/phi/api/yaml/legacy_ops.yaml | 2 +- python/paddle/tensor/math.py | 3 - 5 files changed, 40 insertions(+), 36 deletions(-) diff --git a/paddle/fluid/eager/auto_code_generator/eager_generator.cc b/paddle/fluid/eager/auto_code_generator/eager_generator.cc index 5915494ebc3cd0..d4295089ec824b 100644 --- a/paddle/fluid/eager/auto_code_generator/eager_generator.cc +++ b/paddle/fluid/eager/auto_code_generator/eager_generator.cc @@ -1148,8 +1148,7 @@ static std::string GenerateGradNodeCreationContent( if (!forward_inplace_map.empty()) { const char* CHECKING_INPLACE_TEMPLATE = " // Check Inplace\n" - " egr::EagerUtils::CheckInplace(%s, p_autograd_%s, " - "require_any_grad);\n"; + " egr::EagerUtils::CheckInplace(%s, p_autograd_%s);\n"; for (auto& inplace_pair : forward_inplace_map) { std::string inplace_name = LegalizeVarName(inplace_pair.second); check_inplace_str += paddle::string::Sprintf( diff --git a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py index 650bf0626f1ad2..86779897163032 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py +++ b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py @@ -261,6 +261,9 @@ class {} : public egr::GradNodeBase {{ // Get Outputs {} VLOG(4) << \"Finish AD API: {}"; + + // Check Inplace if needed +{}{} // LOG IF DEBUG {} // Returns @@ -401,7 +404,7 @@ class {} : public egr::GradNodeBase {{ """ CHECK_INPLACE_TEMPLATE = """ - egr::EagerUtils::CheckInplace({}, {}, require_any_grad); + egr::EagerUtils::CheckInplace({}, {}); """ BUMP_INPLACE_VERSION_TEMPLATE = """ @@ -1462,8 +1465,34 @@ def GenerateForwardDefinitionAndDeclaration(self, is_inplaced): returns_str = ", ".join(returns_list) returns_str = f"{returns_type_str}{{{returns_str}}}" + # Check Inplace + check_inplace_str = "" + bump_inplace_version_str = "" + # Note: When the name of original api in yaml is end of '_', that means this api is a + # special inplace api and it doesn't require checking and bumping version (except assign_out_). + # This rule is obscure, so we maybe replace it by adding new design in the future. + if is_inplaced and ( + self.forward_api_name[-1] != '_' + or self.forward_api_name == 'assign_out_' + ): + for inplace_name in forward_inplace_map.keys(): + if ( + not self.is_forward_only + and forward_api_name not in inplace_check_blacklist + ): + inplace_autograd_meta_name = GetAutoGradMetaName( + inplace_name + ) + check_inplace_str += CHECK_INPLACE_TEMPLATE.format( + inplace_name, inplace_autograd_meta_name + ) + bump_inplace_version_str += ( + BUMP_INPLACE_VERSION_TEMPLATE.format( + inplace_name, inplace_name + ) + ) + # Node Creation Pre-Processing - inputs_names = [] if not self.is_forward_only: # 1. Get Input AutoGradMeta inputs_autograd_meta_list = [] @@ -1478,13 +1507,9 @@ def GenerateForwardDefinitionAndDeclaration(self, is_inplaced): ) in backward_grad_outputs_map.items(): if pos == corresponding_pos: has_corresponding_grad_output = True - if ( - has_corresponding_grad_output - or ( - name in forward_inplace_map - and forward_api_name not in inplace_check_blacklist - ) - or self.is_forward_only + if has_corresponding_grad_output or ( + name in forward_inplace_map + and forward_api_name not in inplace_check_blacklist ): input_autograd_meta_name = GetAutoGradMetaName(name) if IsPlainTensorType(ttype): @@ -1532,24 +1557,6 @@ def GenerateForwardDefinitionAndDeclaration(self, is_inplaced): outputs_autograd_meta_list.append(output_autograd_meta) outputs_autograd_meta_str = "\n".join(outputs_autograd_meta_list) - # 3. Check Inplace - check_inplace_str = "" - bump_inplace_version_str = "" - if is_inplaced: - for inplace_name in forward_inplace_map.keys(): - if forward_api_name not in inplace_check_blacklist: - inplace_autograd_meta_name = GetAutoGradMetaName( - inplace_name - ) - check_inplace_str += CHECK_INPLACE_TEMPLATE.format( - inplace_name, inplace_autograd_meta_name - ) - bump_inplace_version_str += ( - BUMP_INPLACE_VERSION_TEMPLATE.format( - inplace_name, inplace_name - ) - ) - # Node Creation self.GenerateNodeCreationCodes() node_creation_str = self.node_creation_str @@ -1643,6 +1650,8 @@ def GenerateForwardDefinitionAndDeclaration(self, is_inplaced): forward_call_str, get_outputs_str, forward_api_name, + check_inplace_str, + bump_inplace_version_str, log_str, returns_str, ) diff --git a/paddle/fluid/eager/utils.h b/paddle/fluid/eager/utils.h index a726528f53d054..11e9520dd29477 100644 --- a/paddle/fluid/eager/utils.h +++ b/paddle/fluid/eager/utils.h @@ -155,9 +155,8 @@ class EagerUtils { } static void CheckInplace(const paddle::experimental::Tensor& target, - const AutogradMeta* autograd_meta, - bool require_any_grad) { - if (require_any_grad && autograd_meta) { + const AutogradMeta* autograd_meta) { + if (autograd_meta) { PADDLE_ENFORCE_EQ(!autograd_meta->StopGradient() && egr::egr_utils_api::IsLeafTensor(target), false, diff --git a/paddle/phi/api/yaml/legacy_ops.yaml b/paddle/phi/api/yaml/legacy_ops.yaml index 049d86473cfc5b..5c2ee1341be420 100755 --- a/paddle/phi/api/yaml/legacy_ops.yaml +++ b/paddle/phi/api/yaml/legacy_ops.yaml @@ -1449,7 +1449,7 @@ - op : remainder args : (Tensor x, Tensor y) - output : Tensor + output : Tensor (out) infer_meta : func : ElementwiseInferMeta kernel : diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 4cd9f61907c5b8..0e72cab995820d 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -810,9 +810,6 @@ def remainder_(x, y, name=None): Inplace version of ``remainder`` API, the output Tensor will be inplaced with input ``x``. Please refer to :ref:`api_tensor_remainder`. """ - op_type = 'elementwise_mod_' - axis = -1 - out_shape = broadcast_shape(x.shape, y.shape) if out_shape != x.shape: raise ValueError( From 7ed9023766dcd4fa71bcdf47593b30e28ed1752d Mon Sep 17 00:00:00 2001 From: zyfncg Date: Mon, 6 Feb 2023 05:57:04 +0000 Subject: [PATCH 3/5] fix bug --- .../fluid/eager/auto_code_generator/eager_generator.cc | 4 +++- .../eager/auto_code_generator/generator/eager_gen.py | 9 ++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/paddle/fluid/eager/auto_code_generator/eager_generator.cc b/paddle/fluid/eager/auto_code_generator/eager_generator.cc index d4295089ec824b..b4aba066245b20 100644 --- a/paddle/fluid/eager/auto_code_generator/eager_generator.cc +++ b/paddle/fluid/eager/auto_code_generator/eager_generator.cc @@ -1148,7 +1148,9 @@ static std::string GenerateGradNodeCreationContent( if (!forward_inplace_map.empty()) { const char* CHECKING_INPLACE_TEMPLATE = " // Check Inplace\n" - " egr::EagerUtils::CheckInplace(%s, p_autograd_%s);\n"; + " if (require_any_grad) {\n" + " egr::EagerUtils::CheckInplace(%s, p_autograd_%s);\n" + " }\n"; for (auto& inplace_pair : forward_inplace_map) { std::string inplace_name = LegalizeVarName(inplace_pair.second); check_inplace_str += paddle::string::Sprintf( diff --git a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py index 86779897163032..dd805133506a66 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py +++ b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py @@ -404,7 +404,9 @@ class {} : public egr::GradNodeBase {{ """ CHECK_INPLACE_TEMPLATE = """ - egr::EagerUtils::CheckInplace({}, {}); + if (require_any_grad) {{ + egr::EagerUtils::CheckInplace({}, {}); + }} """ BUMP_INPLACE_VERSION_TEMPLATE = """ @@ -1480,11 +1482,8 @@ def GenerateForwardDefinitionAndDeclaration(self, is_inplaced): not self.is_forward_only and forward_api_name not in inplace_check_blacklist ): - inplace_autograd_meta_name = GetAutoGradMetaName( - inplace_name - ) check_inplace_str += CHECK_INPLACE_TEMPLATE.format( - inplace_name, inplace_autograd_meta_name + inplace_name, GetAutoGradMetaName(inplace_name) ) bump_inplace_version_str += ( BUMP_INPLACE_VERSION_TEMPLATE.format( From ca6ee7605de237b0bdae55c443a9adcf2b6fb3d5 Mon Sep 17 00:00:00 2001 From: zyfncg Date: Mon, 6 Feb 2023 08:12:59 +0000 Subject: [PATCH 4/5] revert change for CheckInplace --- paddle/fluid/eager/auto_code_generator/eager_generator.cc | 5 ++--- .../fluid/eager/auto_code_generator/generator/eager_gen.py | 4 +--- paddle/fluid/eager/utils.h | 5 +++-- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/paddle/fluid/eager/auto_code_generator/eager_generator.cc b/paddle/fluid/eager/auto_code_generator/eager_generator.cc index b4aba066245b20..5915494ebc3cd0 100644 --- a/paddle/fluid/eager/auto_code_generator/eager_generator.cc +++ b/paddle/fluid/eager/auto_code_generator/eager_generator.cc @@ -1148,9 +1148,8 @@ static std::string GenerateGradNodeCreationContent( if (!forward_inplace_map.empty()) { const char* CHECKING_INPLACE_TEMPLATE = " // Check Inplace\n" - " if (require_any_grad) {\n" - " egr::EagerUtils::CheckInplace(%s, p_autograd_%s);\n" - " }\n"; + " egr::EagerUtils::CheckInplace(%s, p_autograd_%s, " + "require_any_grad);\n"; for (auto& inplace_pair : forward_inplace_map) { std::string inplace_name = LegalizeVarName(inplace_pair.second); check_inplace_str += paddle::string::Sprintf( diff --git a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py index dd805133506a66..41dd4e3058fe1f 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py +++ b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py @@ -404,9 +404,7 @@ class {} : public egr::GradNodeBase {{ """ CHECK_INPLACE_TEMPLATE = """ - if (require_any_grad) {{ - egr::EagerUtils::CheckInplace({}, {}); - }} + egr::EagerUtils::CheckInplace({}, {}, require_any_grad); """ BUMP_INPLACE_VERSION_TEMPLATE = """ diff --git a/paddle/fluid/eager/utils.h b/paddle/fluid/eager/utils.h index 11e9520dd29477..a726528f53d054 100644 --- a/paddle/fluid/eager/utils.h +++ b/paddle/fluid/eager/utils.h @@ -155,8 +155,9 @@ class EagerUtils { } static void CheckInplace(const paddle::experimental::Tensor& target, - const AutogradMeta* autograd_meta) { - if (autograd_meta) { + const AutogradMeta* autograd_meta, + bool require_any_grad) { + if (require_any_grad && autograd_meta) { PADDLE_ENFORCE_EQ(!autograd_meta->StopGradient() && egr::egr_utils_api::IsLeafTensor(target), false, From 4828a673fafe4b86fee8ff75751dcc803a4127c6 Mon Sep 17 00:00:00 2001 From: zyfncg Date: Tue, 7 Feb 2023 07:05:54 +0000 Subject: [PATCH 5/5] polish code --- python/paddle/tensor/math.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 0e72cab995820d..edd44e4e833d32 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -591,8 +591,6 @@ def add_(x, y, name=None): Inplace version of ``add`` API, the output Tensor will be inplaced with input ``x``. Please refer to :ref:`api_tensor_add`. """ - op_type = 'elementwise_add_' - axis = -1 out_shape = broadcast_shape(x.shape, y.shape) if out_shape != x.shape: @@ -673,8 +671,6 @@ def subtract_(x, y, name=None): Inplace version of ``subtract`` API, the output Tensor will be inplaced with input ``x``. Please refer to :ref:`api_tensor_subtract`. """ - axis = -1 - act = None out_shape = broadcast_shape(x.shape, y.shape) if out_shape != x.shape: