Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add paddle.lerp API to do a linear interpolation #37253

Merged
merged 17 commits into from
Dec 8, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
146 changes: 146 additions & 0 deletions paddle/fluid/operators/lerp_op.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/operators/lerp_op.h"

namespace paddle {
namespace operators {

class LerpOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;

void InferShape(framework::InferShapeContext* ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "lerp");
OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "lerp");
OP_INOUT_CHECK(ctx->HasInput("Weight"), "Input", "Weight", "lerp");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "lerp");

auto x_dims = ctx->GetInputDim("X");
auto y_dims = ctx->GetInputDim("Y");
auto w_dims = ctx->GetInputDim("Weight");
framework::DDim out_dims;
out_dims = GetOutputDims(x_dims, y_dims);
if (w_dims.size() > 1 || w_dims[0] != 1) {
out_dims = GetOutputDims(out_dims, w_dims);
}

ctx->SetOutputDim("Out", out_dims);
ctx->ShareLoD("X", /*->*/ "Out");
}

private:
framework::DDim GetOutputDims(const framework::DDim& s_dims,
const framework::DDim& l_dims) const {
if (s_dims.size() > l_dims.size()) {
return GetOutputDims(l_dims, s_dims);
}
std::vector<int64_t> shapes = framework::vectorize<int64_t>(l_dims);
for (int i = s_dims.size() - 1, j = l_dims.size() - 1; i >= 0; --i, --j) {
int64_t s = s_dims[i];
int64_t l = l_dims[j];
if (s != l) {
if (l == 1) {
shapes[j] = s;
} else if (s != 1) {
PADDLE_THROW(platform::errors::InvalidArgument(
"The shape of tensor a %s:%d must match shape of tensor b "
"%s:%d.",
s_dims.to_str(), i, l_dims.to_str(), j));
}
}
}
return framework::make_ddim(shapes);
}
};

class LerpOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X", "(Tensor), The input tensor of lerp op.");
AddInput("Y", "(Tensor), The input tensor of lerp op.");
AddInput("Weight", "(Tensor, optional), The input tensor of lerp op.");
AddOutput("Out", "(Tensor), The output tensor of lerp op.");
AddComment(R"DOC(
Lerp Operator.

This operator is used to do a linear interpolation of input $X$ and $Y$ with $Weight$.

The equation is:

$$Out = X + Weight * (Y - X)$$

Both the input $X$ and $Y$ can carry the LoD (Level of Details) information,
or not. But the output only shares the LoD information with input $X$.

)DOC");
}
};

class LerpGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;

void InferShape(framework::InferShapeContext* ctx) const override {
if (ctx->HasOutput(framework::GradVarName("X"))) {
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
}
if (ctx->HasOutput(framework::GradVarName("Y"))) {
ctx->SetOutputDim(framework::GradVarName("Y"), ctx->GetInputDim("Y"));
}
}
};

template <typename T>
class LerpOpGradMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

void Apply(GradOpPtr<T> op) const override {
op->SetType("lerp_grad");
op->SetInput("X", this->Input("X"));
op->SetInput("Y", this->Input("Y"));
op->SetInput("Weight", this->Input("Weight"));
op->SetInput("Out", this->Output("Out"));
op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
op->SetOutput(framework::GradVarName("Y"), this->InputGrad("Y"));
op->SetAttrMap(this->Attrs());
}
};

DECLARE_INPLACE_OP_INFERER(LerpInplaceInferer, {"X", "Out"});

} // namespace operators
} // namespace paddle

REGISTER_OPERATOR(
lerp, paddle::operators::LerpOp, paddle::operators::LerpOpMaker,
paddle::operators::LerpOpGradMaker<paddle::framework::OpDesc>,
paddle::operators::LerpOpGradMaker<paddle::imperative::OpBase>,
paddle::operators::LerpInplaceInferer);

REGISTER_OPERATOR(lerp_grad, paddle::operators::LerpGradOp);

REGISTER_OP_CPU_KERNEL(
lerp,
paddle::operators::LerpKernel<paddle::platform::CPUDeviceContext, float>,
paddle::operators::LerpKernel<paddle::platform::CPUDeviceContext, double>);

REGISTER_OP_CPU_KERNEL(
lerp_grad,
paddle::operators::LerpGradKernel<paddle::platform::CPUDeviceContext,
float>,
paddle::operators::LerpGradKernel<paddle::platform::CPUDeviceContext,
double>);
27 changes: 27 additions & 0 deletions paddle/fluid/operators/lerp_op.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/operators/lerp_op.h"

REGISTER_OP_CUDA_KERNEL(
lerp,
paddle::operators::LerpKernel<paddle::platform::CUDADeviceContext, float>,
paddle::operators::LerpKernel<paddle::platform::CUDADeviceContext, double>);

REGISTER_OP_CUDA_KERNEL(
lerp_grad,
paddle::operators::LerpGradKernel<paddle::platform::CUDADeviceContext,
float>,
paddle::operators::LerpGradKernel<paddle::platform::CUDADeviceContext,
double>);
217 changes: 217 additions & 0 deletions paddle/fluid/operators/lerp_op.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,217 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"

#ifdef _WIN32
#ifndef NOMINMAX
zhhsplendid marked this conversation as resolved.
Show resolved Hide resolved
#define NOMINMAX // msvc max/min macro conflict with std::min/max
#endif
#endif

namespace paddle {
namespace operators {

static framework::DDim ExtendDims2Rank(const framework::DDim& in_dims,
int rank) {
if (in_dims.size() == rank) {
return in_dims;
}
std::vector<int64_t> shapes(rank, 1);
for (int i = in_dims.size() - 1, j = rank - 1; i >= 0; --i, --j) {
shapes[j] = in_dims[i];
}
return framework::make_ddim(shapes);
}

template <size_t D>
static void GetBroadcastDims(const framework::DDim& in_dims,
const framework::DDim& out_dims,
Eigen::DSizes<int, D>* bcast_dims) {
for (size_t i = 0; i < D; ++i) {
if (in_dims[i] == out_dims[i]) {
(*bcast_dims)[i] = 1;
} else {
(*bcast_dims)[i] = std::max(in_dims[i], out_dims[i]);
}
}
}

template <typename DeviceContext, typename T, size_t D>
static void LerpFunction(const framework::ExecutionContext& ctx) {
auto x = ctx.Input<framework::Tensor>("X");
auto y = ctx.Input<framework::Tensor>("Y");
auto w = ctx.Input<framework::Tensor>("Weight");
auto out = ctx.Output<framework::Tensor>("Out");
out->mutable_data<T>(ctx.GetPlace());

auto out_dims = out->dims();
auto x_dims = ExtendDims2Rank(x->dims(), D);
auto y_dims = ExtendDims2Rank(y->dims(), D);
auto w_dims = ExtendDims2Rank(w->dims(), D);
Eigen::DSizes<int, D> x_bcast_dims;
Eigen::DSizes<int, D> y_bcast_dims;
Eigen::DSizes<int, D> w_bcast_dims;
GetBroadcastDims<D>(x_dims, out_dims, &x_bcast_dims);
GetBroadcastDims<D>(y_dims, out_dims, &y_bcast_dims);
GetBroadcastDims<D>(w_dims, out_dims, &w_bcast_dims);

auto eigen_x = framework::EigenTensor<T, D>::From(*x, x_dims);
auto eigen_y = framework::EigenTensor<T, D>::From(*y, y_dims);
auto eigen_w = framework::EigenTensor<T, D>::From(*w, w_dims);
auto eigen_out = framework::EigenTensor<T, D>::From(*out);

auto& place = *ctx.template device_context<DeviceContext>().eigen_device();
eigen_out.device(place) =
eigen_x.broadcast(x_bcast_dims) +
eigen_w.broadcast(w_bcast_dims) *
(eigen_y.broadcast(y_bcast_dims) - eigen_x.broadcast(x_bcast_dims));
}

template <typename DeviceContext, typename T, size_t D>
static void LerpGradFunction(const framework::ExecutionContext& ctx) {
auto w = ctx.Input<framework::Tensor>("Weight");
auto dout = ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
auto dx = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
auto dy = ctx.Output<framework::Tensor>(framework::GradVarName("Y"));

auto dout_dims = dout->dims();
auto dx_dims = ExtendDims2Rank(dx->dims(), D);
auto dy_dims = ExtendDims2Rank(dy->dims(), D);
auto w_dims = ExtendDims2Rank(w->dims(), D);
Eigen::DSizes<int, D> dx_bcast_dims;
Eigen::DSizes<int, D> dy_bcast_dims;
Eigen::DSizes<int, D> w_bcast_dims;
GetBroadcastDims<D>(dx_dims, dout_dims, &dx_bcast_dims);
GetBroadcastDims<D>(dy_dims, dout_dims, &dy_bcast_dims);
GetBroadcastDims<D>(w_dims, dout_dims, &w_bcast_dims);

auto eigen_w = framework::EigenTensor<T, D>::From(*w, w_dims);
auto eigen_dout = framework::EigenTensor<T, D>::From(*dout);

Eigen::DSizes<int, D * 2> dx_reshape_dims;
Eigen::DSizes<int, D * 2> dy_reshape_dims;
Eigen::DSizes<int, D> reduce_dims;
for (int i = 0; i < dout_dims.size(); ++i) {
dx_reshape_dims[2 * i] = dx_bcast_dims[i];
dx_reshape_dims[2 * i + 1] = dx_dims[i];
dy_reshape_dims[2 * i] = dy_bcast_dims[i];
dy_reshape_dims[2 * i + 1] = dy_dims[i];
reduce_dims[i] = 2 * i;
}

auto& place = *ctx.template device_context<DeviceContext>().eigen_device();

if (dx) {
dx->mutable_data<T>(ctx.GetPlace());
auto eigen_dx = framework::EigenTensor<T, D>::From(*dx, dx_dims);
auto eigen_expr = (1 - eigen_w.broadcast(w_bcast_dims)) * eigen_dout;
eigen_dx.device(place) = eigen_expr.reshape(dx_reshape_dims)
.sum(reduce_dims)
.reshape(eigen_dx.dimensions());
}
if (dy) {
dy->mutable_data<T>(ctx.GetPlace());
auto eigen_dy = framework::EigenTensor<T, D>::From(*dy, dy_dims);
auto eigen_expr = eigen_w.broadcast(w_bcast_dims) * eigen_dout;
eigen_dy.device(place) = eigen_expr.reshape(dy_reshape_dims)
.sum(reduce_dims)
.reshape(eigen_dy.dimensions());
}
}

template <typename DeviceContext, typename T>
class LerpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
int rank = ctx.Output<framework::Tensor>("Out")->dims().size();
PADDLE_ENFORCE_GE(
rank, 1,
platform::errors::InvalidArgument(
"The number of dimensions for LerpOp must be "
"greater than or equal to 1, but the value received is %d.",
rank));
PADDLE_ENFORCE_LE(
rank, 6, platform::errors::InvalidArgument(
"The number of dimensions for LerpOp must be "
"less than or equal to 6, but the value received is %d.",
rank));
switch (rank) {
case 1:
LerpFunction<DeviceContext, T, 1>(ctx);
break;
case 2:
LerpFunction<DeviceContext, T, 2>(ctx);
break;
case 3:
LerpFunction<DeviceContext, T, 3>(ctx);
break;
case 4:
LerpFunction<DeviceContext, T, 4>(ctx);
break;
case 5:
LerpFunction<DeviceContext, T, 5>(ctx);
break;
case 6:
LerpFunction<DeviceContext, T, 6>(ctx);
break;
}
}
};

template <typename DeviceContext, typename T>
class LerpGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
int rank = ctx.Input<framework::Tensor>(framework::GradVarName("Out"))
->dims()
.size();
PADDLE_ENFORCE_GE(
rank, 1,
platform::errors::InvalidArgument(
"The number of dimensions for LerpGradOp must be "
"greater than or equal to 1, but the value received is %d.",
rank));
PADDLE_ENFORCE_LE(
rank, 6, platform::errors::InvalidArgument(
"The number of dimensions for LerpGradOp must be "
"less than or equal to 6, but the value received is %d.",
rank));
switch (rank) {
case 1:
LerpGradFunction<DeviceContext, T, 1>(ctx);
break;
case 2:
LerpGradFunction<DeviceContext, T, 2>(ctx);
break;
case 3:
LerpGradFunction<DeviceContext, T, 3>(ctx);
break;
case 4:
LerpGradFunction<DeviceContext, T, 4>(ctx);
break;
case 5:
LerpGradFunction<DeviceContext, T, 5>(ctx);
break;
case 6:
LerpGradFunction<DeviceContext, T, 6>(ctx);
break;
}
}
};

} // namespace operators
} // namespace paddle
Loading