Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove axis in some elementwise api #50190

Merged
merged 5 commits into from
Feb 7, 2023
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 32 additions & 26 deletions paddle/fluid/eager/auto_code_generator/generator/eager_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -261,6 +261,9 @@ class {} : public egr::GradNodeBase {{
// Get Outputs
{}
VLOG(4) << \"Finish AD API: {}";

// Check Inplace if needed
{}{}
// LOG IF DEBUG
{}
// Returns
Expand Down Expand Up @@ -1462,8 +1465,31 @@ def GenerateForwardDefinitionAndDeclaration(self, is_inplaced):
returns_str = ", ".join(returns_list)
returns_str = f"{returns_type_str}{{{returns_str}}}"

# Check Inplace
check_inplace_str = ""
bump_inplace_version_str = ""
# Note: When the name of original api in yaml is end of '_', that means this api is a
# special inplace api and it doesn't require checking and bumping version (except assign_out_).
# This rule is obscure, so we maybe replace it by adding new design in the future.
if is_inplaced and (
self.forward_api_name[-1] != '_'
or self.forward_api_name == 'assign_out_'
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

此处为什么要特殊处理

Why do we need this special case?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个属于inplace处理机制的遗留问题,一般对于有反向的inplace接口都需要做InplaceCheck处理,但assign_out_早期没有加InplaceCheck导致现在加上就会有不兼容问题,所以暂时只能通过黑名单来跳过

):
for inplace_name in forward_inplace_map.keys():
if (
not self.is_forward_only
and forward_api_name not in inplace_check_blacklist
):
check_inplace_str += CHECK_INPLACE_TEMPLATE.format(
inplace_name, GetAutoGradMetaName(inplace_name)
)
bump_inplace_version_str += (
BUMP_INPLACE_VERSION_TEMPLATE.format(
inplace_name, inplace_name
)
)

# Node Creation Pre-Processing
inputs_names = []
if not self.is_forward_only:
# 1. Get Input AutoGradMeta
inputs_autograd_meta_list = []
Expand All @@ -1478,13 +1504,9 @@ def GenerateForwardDefinitionAndDeclaration(self, is_inplaced):
) in backward_grad_outputs_map.items():
if pos == corresponding_pos:
has_corresponding_grad_output = True
if (
has_corresponding_grad_output
or (
name in forward_inplace_map
and forward_api_name not in inplace_check_blacklist
)
or self.is_forward_only
if has_corresponding_grad_output or (
name in forward_inplace_map
and forward_api_name not in inplace_check_blacklist
):
input_autograd_meta_name = GetAutoGradMetaName(name)
if IsPlainTensorType(ttype):
Expand Down Expand Up @@ -1532,24 +1554,6 @@ def GenerateForwardDefinitionAndDeclaration(self, is_inplaced):
outputs_autograd_meta_list.append(output_autograd_meta)
outputs_autograd_meta_str = "\n".join(outputs_autograd_meta_list)

# 3. Check Inplace
check_inplace_str = ""
bump_inplace_version_str = ""
if is_inplaced:
for inplace_name in forward_inplace_map.keys():
if forward_api_name not in inplace_check_blacklist:
inplace_autograd_meta_name = GetAutoGradMetaName(
inplace_name
)
check_inplace_str += CHECK_INPLACE_TEMPLATE.format(
inplace_name, inplace_autograd_meta_name
)
bump_inplace_version_str += (
BUMP_INPLACE_VERSION_TEMPLATE.format(
inplace_name, inplace_name
)
)

# Node Creation
self.GenerateNodeCreationCodes()
node_creation_str = self.node_creation_str
Expand Down Expand Up @@ -1643,6 +1647,8 @@ def GenerateForwardDefinitionAndDeclaration(self, is_inplaced):
forward_call_str,
get_outputs_str,
forward_api_name,
check_inplace_str,
bump_inplace_version_str,
log_str,
returns_str,
)
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/api/yaml/legacy_ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1449,7 +1449,7 @@

- op : remainder
args : (Tensor x, Tensor y)
output : Tensor
output : Tensor (out)
infer_meta :
func : ElementwiseInferMeta
kernel :
Expand Down
22 changes: 0 additions & 22 deletions python/paddle/fluid/layers/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,28 +101,6 @@ def _get_reduce_dim(dim, input):
return reduce_all, dim


@dygraph_only
def _elementwise_op_in_dygraph(
x, y, axis=-1, act=None, use_mkldnn=False, op_name=None
):
def is_inplace(op_name):
return op_name[-1] == "_"

if op_name not in OP_NAMEMAPPING.keys() or axis != -1:
op = getattr(_legacy_C_ops, op_name)
out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)
else:
if in_dygraph_mode():
op = getattr(
_C_ops,
OP_NAMEMAPPING[op_name] if not is_inplace(op_name) else op_name,
)
out = op(x, y)
return dygraph_utils._append_activation_in_dygraph(
out, act, use_mkldnn=use_mkldnn
)


@deprecated(since="2.0.0", update_to="paddle.nn.functional.embedding")
def embedding(
input,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def setUp(self):
self.fetch_list = [out]

def append_eltwise(self, data1, data2):
return paddle.tensor.math._add_with_axis(x=data1, y=data2, axis=0)
return paddle.tensor.math.add(x=data1, y=data2)

def test_check_output(self):
if os.path.exists(self.path + "_opt_cache"):
Expand All @@ -67,21 +67,21 @@ class TensorRTSubgraphPassElementwiseBroadcastTest1(
TensorRTSubgraphPassElementwiseBroadcastTest
):
def append_eltwise(self, data1, data2):
return paddle.tensor.math._subtract_with_axis(x=data1, y=data2, axis=0)
return paddle.tensor.math.subtract(x=data1, y=data2)


class TensorRTSubgraphPassElementwiseBroadcastTest2(
TensorRTSubgraphPassElementwiseBroadcastTest
):
def append_eltwise(self, data1, data2):
return paddle.tensor.math._multiply_with_axis(x=data1, y=data2, axis=0)
return paddle.tensor.math.multiply(x=data1, y=data2)


class TensorRTSubgraphPassElementwiseBroadcastTest3(
TensorRTSubgraphPassElementwiseBroadcastTest
):
def append_eltwise(self, data1, data2):
return paddle.tensor.math._divide_with_axis(x=data1, y=data2, axis=0)
return paddle.tensor.math.divide(x=data1, y=data2)


if __name__ == "__main__":
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ def func(self, place):
y = paddle.static.data('y', shape, dtype)
x.persistable = True
y.persistable = True
out = paddle.tensor.math._divide_with_axis(x, y, axis=0)
out = paddle.tensor.math.divide(x, y)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
y_arr = np.random.uniform(-1, 1, shape).astype(dtype)
y_arr[np.abs(y_arr) < 0.005] = 0.02
Expand Down
Loading