Skip to content

Commit

Permalink
[CleanOps]del unuseful op (PaddlePaddle#57898)
Browse files Browse the repository at this point in the history
* del unuseful op
  • Loading branch information
wanghuancoder authored and Frida-a committed Oct 14, 2023
1 parent 23d33d3 commit 54d1d84
Show file tree
Hide file tree
Showing 13 changed files with 0 additions and 1,424 deletions.
6 changes: 0 additions & 6 deletions paddle/fluid/operators/fill_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -78,9 +78,3 @@ REGISTER_OPERATOR(
ops::FillOpVarTypeInference,
paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,
paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>);
REGISTER_OP_CPU_KERNEL(fill,
ops::FillKernel<float>,
ops::FillKernel<double>,
ops::FillKernel<int64_t>,
ops::FillKernel<int>,
ops::FillKernel<paddle::platform::float16>);
24 changes: 0 additions & 24 deletions paddle/fluid/operators/fill_op.cu.cc

This file was deleted.

37 changes: 0 additions & 37 deletions paddle/fluid/operators/fill_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,42 +42,5 @@ struct FillOpVisitor {
const std::vector<float> &value_;
};

template <typename T>
class FillKernel : public framework::OpKernel<T> {
public:
void Compute(const paddle::framework::ExecutionContext &ctx) const override {
auto &out = GET_DATA_SAFELY(
ctx.Output<phi::DenseTensor>("Out"), "Output", "Out", "Fill");
out.Resize(phi::make_ddim(ctx.Attr<std::vector<int>>("shape")));
auto dtype =
static_cast<framework::proto::VarType::Type>(ctx.Attr<int>("dtype"));
auto phi_dtype = framework::TransToPhiDataType(dtype);
platform::CPUPlace cpu;
auto force_cpu = ctx.Attr<bool>("force_cpu");
out.mutable_data(force_cpu ? cpu : ctx.GetPlace(), phi_dtype);

phi::DenseTensor tensor;

if (force_cpu || platform::is_cpu_place(ctx.GetPlace())) {
tensor.ShareDataWith(out);
} else {
// Always make tensor in CPU memory.
tensor.Resize(out.dims());
tensor.mutable_data(cpu, phi_dtype);
}

framework::VisitDataType(
dtype, FillOpVisitor(&tensor, ctx.Attr<std::vector<float>>("value")));

if (!force_cpu && platform::is_gpu_place(ctx.GetPlace())) {
// Copy tensor to out
framework::TensorCopy(
tensor,
ctx.GetPlace(),
ctx.template device_context<platform::DeviceContext>(),
&out);
}
}
};
} // namespace operators
} // namespace paddle
12 changes: 0 additions & 12 deletions paddle/fluid/operators/fused/fused_bn_activation_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -88,17 +88,5 @@ class FusedBatchNormActOpInferVarType
}
};

template <typename T, typename DeviceContext>
class FusedBatchNormActKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override;
};

template <typename T, typename DeviceContext>
class FusedBatchNormActGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override;
};

} // namespace operators
} // namespace paddle
230 changes: 0 additions & 230 deletions paddle/fluid/operators/pad2d_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -402,231 +402,6 @@ static inline void GetPaddings(int* paddings,
std::copy(pads.begin(), pads.end(), paddings);
}
}

template <typename T, typename DeviceContext>
class Pad2dCPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
std::array<int, 4> pads;
GetPaddings(pads.data(), context);
auto mode = context.Attr<std::string>("mode");
auto data_format = context.Attr<std::string>("data_format");
T value = static_cast<T>(context.Attr<float>("pad_value"));

auto* x = context.Input<phi::DenseTensor>("X");
auto in_dims = x->dims();
const T* in_data = x->data<T>();

auto* out = context.Output<phi::DenseTensor>("Out");
if (data_format == "NCHW") {
out->Resize({in_dims[0],
in_dims[1],
in_dims[2] + pads[0] + pads[1],
in_dims[3] + pads[2] + pads[3]});
} else {
out->Resize({in_dims[0],
in_dims[1] + pads[0] + pads[1],
in_dims[2] + pads[2] + pads[3],
in_dims[3]});
}
auto out_dims = out->dims();
T* out_data = out->mutable_data<T>(context.GetPlace());

const int pad_top = pads[0];
const int pad_left = pads[2];
const int num = static_cast<int>(in_dims[0]);
if (data_format == "NCHW") {
const int channels = static_cast<int>(in_dims[1]);
const int in_height = static_cast<int>(in_dims[2]);
const int in_width = static_cast<int>(in_dims[3]);
const int out_height = static_cast<int>(out_dims[2]);
const int out_width = static_cast<int>(out_dims[3]);
if (mode == "reflect") {
Pad2DReflectNCHW(in_data,
num,
channels,
in_height,
in_width,
out_height,
out_width,
pad_top,
pad_left,
out_data);
} else if (mode == "edge") {
Pad2DEdgeNCHW(in_data,
num,
channels,
in_height,
in_width,
out_height,
out_width,
pad_top,
pad_left,
out_data);
} else {
Pad2DConstNCHW(in_data,
num,
channels,
in_height,
in_width,
out_height,
out_width,
pad_top,
pad_left,
value,
out_data);
}
} else {
const int channels = static_cast<int>(in_dims[3]);
const int in_height = static_cast<int>(in_dims[1]);
const int in_width = static_cast<int>(in_dims[2]);
const int out_height = static_cast<int>(out_dims[1]);
const int out_width = static_cast<int>(out_dims[2]);
if (mode == "reflect") {
Pad2DReflectNHWC(in_data,
num,
channels,
in_height,
in_width,
out_height,
out_width,
pad_top,
pad_left,
out_data);
} else if (mode == "edge") {
Pad2DEdgeNHWC(in_data,
num,
channels,
in_height,
in_width,
out_height,
out_width,
pad_top,
pad_left,
out_data);
} else {
Pad2DConstNHWC(in_data,
num,
channels,
in_height,
in_width,
out_height,
out_width,
pad_top,
pad_left,
value,
out_data);
}
}
}
};

template <typename T, typename DeviceContext>
class Pad2dGradCPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
std::array<int, 4> pads;
GetPaddings(pads.data(), context);
auto mode = context.Attr<std::string>("mode");
auto data_format = context.Attr<std::string>("data_format");
auto* d_out =
context.Input<phi::DenseTensor>(framework::GradVarName("Out"));
auto* d_in = context.Output<phi::DenseTensor>(framework::GradVarName("X"));
auto d_in_dims = d_in->dims();
auto d_out_dims = d_out->dims();
const T* d_out_data = d_out->data<T>();
T* d_in_data = d_in->mutable_data<T>(context.GetPlace());
phi::funcs::SetConstant<phi::CPUContext, T> set_zero;
set_zero(context.template device_context<phi::CPUContext>(),
d_in,
static_cast<T>(0));
const int pad_top = pads[0];
const int pad_left = pads[2];
const int num = static_cast<int>(d_in_dims[0]);
if (data_format == "NCHW") {
const int channels = static_cast<int>(d_in_dims[1]);
const int in_height = static_cast<int>(d_in_dims[2]);
const int in_width = static_cast<int>(d_in_dims[3]);
const int out_height = static_cast<int>(d_out_dims[2]);
const int out_width = static_cast<int>(d_out_dims[3]);
if (mode == "reflect") {
Pad2DGradReflectNCHW(d_in_data,
num,
channels,
in_height,
in_width,
out_height,
out_width,
pad_top,
pad_left,
d_out_data);
} else if (mode == "edge") {
Pad2DGradEdgeNCHW(d_in_data,
num,
channels,
in_height,
in_width,
out_height,
out_width,
pad_top,
pad_left,
d_out_data);
} else {
Pad2DGradConstNCHW(d_in_data,
num,
channels,
in_height,
in_width,
out_height,
out_width,
pad_top,
pad_left,
d_out_data);
}
} else {
const int channels = static_cast<int>(d_in_dims[3]);
const int in_height = static_cast<int>(d_in_dims[1]);
const int in_width = static_cast<int>(d_in_dims[2]);
const int out_height = static_cast<int>(d_out_dims[1]);
const int out_width = static_cast<int>(d_out_dims[2]);
if (mode == "reflect") {
Pad2DGradReflectNHWC(d_in_data,
num,
channels,
in_height,
in_width,
out_height,
out_width,
pad_top,
pad_left,
d_out_data);
} else if (mode == "edge") {
Pad2DGradEdgeNHWC(d_in_data,
num,
channels,
in_height,
in_width,
out_height,
out_width,
pad_top,
pad_left,
d_out_data);
} else {
Pad2DGradConstNHWC(d_in_data,
num,
channels,
in_height,
in_width,
out_height,
out_width,
pad_top,
pad_left,
d_out_data);
}
}
}
};

class Pad2dOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
Expand Down Expand Up @@ -872,8 +647,3 @@ REGISTER_OPERATOR(pad2d,
REGISTER_OPERATOR(pad2d_grad,
ops::Pad2dOpGrad,
ops::Pad2dOpGradNoNeedBufferVarsInferer);

PD_REGISTER_STRUCT_KERNEL(
pad2d, CPU, ALL_LAYOUT, ops::Pad2dCPUKernel, float, double, int, int64_t) {}
PD_REGISTER_STRUCT_KERNEL(
pad2d_grad, CPU, ALL_LAYOUT, ops::Pad2dGradCPUKernel, float, double) {}
Loading

0 comments on commit 54d1d84

Please sign in to comment.