Skip to content

Commit

Permalink
support auto generate for static op elementwise_min
Browse files Browse the repository at this point in the history
  • Loading branch information
RedContritio committed Jul 7, 2023
1 parent 2fc429f commit ba8d0ce
Show file tree
Hide file tree
Showing 11 changed files with 67 additions and 209 deletions.
168 changes: 0 additions & 168 deletions paddle/fluid/operators/elementwise/elementwise_min_op.cc

This file was deleted.

5 changes: 2 additions & 3 deletions paddle/fluid/operators/elementwise/unity_build_rule.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,8 @@
# Generally, the combination rules in this file do not need to be modified.
# If there are some redefined error in compiling with the source file which
# in combination rule, you can remove the source file from the following rules.
register_unity_group(
cc elementwise_add_op.cc elementwise_div_op.cc elementwise_min_op.cc
elementwise_mul_op.cc elementwise_sub_op.cc)
register_unity_group(cc elementwise_add_op.cc elementwise_div_op.cc
elementwise_mul_op.cc elementwise_sub_op.cc)
register_unity_group(
cu
elementwise_add_op.cu
Expand Down
11 changes: 11 additions & 0 deletions paddle/phi/api/yaml/backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -870,6 +870,17 @@
func : fmax_grad
data_type : out_grad

- backward_op : fmin_grad
forward : fmin(Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param: [x, y]
kernel :
func : fmin_grad
data_type : out_grad

- backward_op : fold_grad
forward: fold (Tensor x, int[] output_sizes, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) -> Tensor(out)
args: (Tensor x, Tensor out_grad, int[] output_sizes, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations)
Expand Down
10 changes: 0 additions & 10 deletions paddle/phi/api/yaml/legacy_backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -232,16 +232,6 @@
func : UnchangedInferMeta
invoke : zeros_like(out_grad)

- backward_op : fmin_grad
forward : fmin(Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param: [x, y]
kernel :
func : fmin_grad

- backward_op : frobenius_norm_grad
forward : frobenius_norm(Tensor x, int64_t[] axis, bool keep_dim, bool reduce_all) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int64_t[] axis, bool keep_dim, bool reduce_all)
Expand Down
10 changes: 0 additions & 10 deletions paddle/phi/api/yaml/legacy_ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -316,16 +316,6 @@
kernel :
func : floor_divide

- op : fmin
args : (Tensor x, Tensor y)
output : Tensor(out)
infer_meta :
param: [x, y]
func : ElementwiseInferMeta
kernel :
func : fmin
backward : fmin_grad

- op : frobenius_norm
args : (Tensor x, int64_t[] axis, bool keep_dim, bool reduce_all)
output : Tensor(out)
Expand Down
12 changes: 12 additions & 0 deletions paddle/phi/api/yaml/op_compat.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1085,9 +1085,15 @@

- op : fmin (elementwise_fmin)
backward : fmin_grad (elementwise_fmin_grad)
inputs :
{x : X, y : Y}
outputs :
{out : Out}
extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
complex_promote : [X, Y]
manual_signature : [fmin]

- op : fold
inputs :
Expand Down Expand Up @@ -1786,9 +1792,15 @@

- op : minimum (elementwise_min)
backward : minimum_grad (elementwise_min_grad)
inputs :
{x : X, y : Y}
outputs :
{out : Out}
extra :
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
complex_promote : [X, Y]
manual_signature : [minimum]

- op : mish
backward : mish_grad
Expand Down
8 changes: 8 additions & 0 deletions paddle/phi/api/yaml/op_version.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,14 @@
comment : In order to support the function of scaling the input Y when using the operator of elementwise_max.
default : 1.0

- op : elementwise_min
version :
- checkpoint : Register elementwise_min for adding the attribute of Scale_y.
action :
- add_attr : Scale_y
comment : In order to support the function of scaling the input Y when using the operator of elementwise_min.
default : 1.0

- op : elementwise_mod
version :
- checkpoint : Register elementwise_mod for adding the attribute of Scale_y
Expand Down
10 changes: 10 additions & 0 deletions paddle/phi/api/yaml/ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -941,6 +941,16 @@
func : fmax
backward : fmax_grad

- op : fmin
args : (Tensor x, Tensor y)
output : Tensor(out)
infer_meta :
func : ElementwiseInferMeta
param: [x, y]
kernel :
func : fmin
backward : fmin_grad

- op : fold
args: (Tensor x, int[] output_sizes, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations)
output: Tensor(out)
Expand Down
12 changes: 12 additions & 0 deletions paddle/phi/api/yaml/static_backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -211,6 +211,18 @@
kernel :
func : min_grad

- backward_op : minimum_grad
forward : minimum(Tensor x, Tensor y, int axis = -1) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param: [x, y]
kernel :
func : minimum_grad
data_type : out_grad
composite : minimum_grad(x, y, out_grad, x_grad, y_grad)

- backward_op : norm_grad
forward : norm (Tensor x, int axis, float epsilon=1.0e-10f, bool is_test=false) -> Tensor(out), Tensor(norm)
args : (Tensor x, Tensor norm, Tensor out_grad, int axis, float epsilon, bool is_test)
Expand Down
9 changes: 9 additions & 0 deletions paddle/phi/api/yaml/static_ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -397,6 +397,15 @@
param : [x, axis, keepdim, reduce_all]
backward : min_grad

- op : minimum
args : (Tensor x, Tensor y, int axis = -1)
output : Tensor(out)
infer_meta :
func : ElementwiseRawInferMeta
kernel :
func : minimum
backward : minimum_grad

- op : norm
args : (Tensor x, int axis, float epsilon=1.0e-10f, bool is_test=false)
output : Tensor(out), Tensor(norm)
Expand Down
21 changes: 3 additions & 18 deletions paddle/phi/ops/compat/elementwise_sig.cc
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,9 @@ KernelSignature ElementwiseMaxOpArgumentMapping(

KernelSignature ElementwiseMinOpArgumentMapping(
const ArgumentMappingContext& ctx) {
if (ctx.IsForInferShape()) {
return KernelSignature("minimum_raw", {"X", "Y"}, {"axis"}, {"Out"});
}
int axis = paddle::any_cast<int>(ctx.Attr("axis"));
if (axis == -1) {
return KernelSignature("minimum", {"X", "Y"}, {}, {"Out"});
Expand Down Expand Up @@ -162,12 +165,6 @@ KernelSignature ElementwiseDivGradOpArgumentMapping(
{"X@GRAD", "Y@GRAD"});
}

KernelSignature ElementwiseFMinGradOpArgumentMapping(
const ArgumentMappingContext& ctx UNUSED) {
return KernelSignature(
"fmin_grad", {"X", "Y", "Out@GRAD"}, {}, {"X@GRAD", "Y@GRAD"});
}

KernelSignature ElementwiseDivDoubleGradOpArgumentMapping(
const ArgumentMappingContext& ctx UNUSED) {
return KernelSignature("divide_double_grad",
Expand Down Expand Up @@ -209,12 +206,6 @@ KernelSignature ElementwiseMulTripleGradOpArgumentMapping(
{"D_X", "D_Y", "D_DOut", "D_DDX", "D_DDY"});
}

KernelSignature ElementwiseMinGradOpArgumentMapping(
const ArgumentMappingContext& ctx UNUSED) {
return KernelSignature(
"minimum_grad", {"X", "Y", "Out@GRAD"}, {}, {"X@GRAD", "Y@GRAD"});
}

} // namespace phi

PD_REGISTER_BASE_KERNEL_NAME(elementwise_add, add);
Expand All @@ -237,8 +228,6 @@ PD_REGISTER_BASE_KERNEL_NAME(elementwise_mul_grad_grad, multiply_double_grad);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_mul_triple_grad, multiply_triple_grad);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_fmax, fmax);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_fmin, fmin);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_fmin_grad, fmin_grad);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_min_grad, minimum_grad);

PD_REGISTER_ARG_MAPPING_FN(elementwise_add,
phi::ElementwiseAddOpArgumentMapping);
Expand Down Expand Up @@ -282,8 +271,4 @@ PD_REGISTER_ARG_MAPPING_FN(elementwise_fmax,
phi::ElementwiseFMaxOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_fmin,
phi::ElementwiseFMinOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_fmin_grad,
phi::ElementwiseFMinGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_min_grad,
phi::ElementwiseMinGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(grad_add, phi::ElementwiseGradAddOpArgumentMapping);

0 comments on commit ba8d0ce

Please sign in to comment.