Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add paddle.lerp API to do a linear interpolation #37253

Merged
merged 17 commits into from
Dec 8, 2021
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 18 additions & 15 deletions paddle/fluid/operators/lerp_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,17 @@
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"

#ifdef _WIN32
#ifndef NOMINMAX
zhhsplendid marked this conversation as resolved.
Show resolved Hide resolved
#define NOMINMAX
#define NOMINMAX // msvc max/min macro conflict with std::min/max
#endif
#endif

namespace paddle {
namespace operators {

static framework::DDim GetNewDims(const framework::DDim& in_dims, int rank) {
static framework::DDim ExtendDims2Rank(const framework::DDim& in_dims,
int rank) {
if (in_dims.size() == rank) {
return in_dims;
}
Expand All @@ -35,7 +38,7 @@ static framework::DDim GetNewDims(const framework::DDim& in_dims, int rank) {
}

template <size_t D>
static void GetBraodcastDims(const framework::DDim& in_dims,
static void GetBroadcastDims(const framework::DDim& in_dims,
const framework::DDim& out_dims,
Eigen::DSizes<int, D>* bcast_dims) {
for (size_t i = 0; i < D; ++i) {
Expand All @@ -56,15 +59,15 @@ static void LerpFunction(const framework::ExecutionContext& ctx) {
out->mutable_data<T>(ctx.GetPlace());

auto out_dims = out->dims();
auto x_dims = GetNewDims(x->dims(), D);
auto y_dims = GetNewDims(y->dims(), D);
auto w_dims = GetNewDims(w->dims(), D);
auto x_dims = ExtendDims2Rank(x->dims(), D);
auto y_dims = ExtendDims2Rank(y->dims(), D);
auto w_dims = ExtendDims2Rank(w->dims(), D);
Eigen::DSizes<int, D> x_bcast_dims;
Eigen::DSizes<int, D> y_bcast_dims;
Eigen::DSizes<int, D> w_bcast_dims;
GetBraodcastDims<D>(x_dims, out_dims, &x_bcast_dims);
GetBraodcastDims<D>(y_dims, out_dims, &y_bcast_dims);
GetBraodcastDims<D>(w_dims, out_dims, &w_bcast_dims);
GetBroadcastDims<D>(x_dims, out_dims, &x_bcast_dims);
GetBroadcastDims<D>(y_dims, out_dims, &y_bcast_dims);
GetBroadcastDims<D>(w_dims, out_dims, &w_bcast_dims);

auto eigen_x = framework::EigenTensor<T, D>::From(*x, x_dims);
auto eigen_y = framework::EigenTensor<T, D>::From(*y, y_dims);
Expand All @@ -86,15 +89,15 @@ static void LerpGradFunction(const framework::ExecutionContext& ctx) {
auto dy = ctx.Output<framework::Tensor>(framework::GradVarName("Y"));

auto dout_dims = dout->dims();
auto dx_dims = GetNewDims(dx->dims(), D);
auto dy_dims = GetNewDims(dy->dims(), D);
auto w_dims = GetNewDims(w->dims(), D);
auto dx_dims = ExtendDims2Rank(dx->dims(), D);
auto dy_dims = ExtendDims2Rank(dy->dims(), D);
auto w_dims = ExtendDims2Rank(w->dims(), D);
Eigen::DSizes<int, D> dx_bcast_dims;
Eigen::DSizes<int, D> dy_bcast_dims;
Eigen::DSizes<int, D> w_bcast_dims;
GetBraodcastDims<D>(dx_dims, dout_dims, &dx_bcast_dims);
GetBraodcastDims<D>(dy_dims, dout_dims, &dy_bcast_dims);
GetBraodcastDims<D>(w_dims, dout_dims, &w_bcast_dims);
GetBroadcastDims<D>(dx_dims, dout_dims, &dx_bcast_dims);
GetBroadcastDims<D>(dy_dims, dout_dims, &dy_bcast_dims);
GetBroadcastDims<D>(w_dims, dout_dims, &w_bcast_dims);

auto eigen_w = framework::EigenTensor<T, D>::From(*w, w_dims);
auto eigen_dout = framework::EigenTensor<T, D>::From(*dout);
Expand Down
24 changes: 18 additions & 6 deletions python/paddle/fluid/tests/unittests/test_lerp_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,8 +81,8 @@ def setUp(self):
self.init_dtype()
self.x = np.arange(1., 5.).astype(self.dtype)
self.y = np.full(4, 10.).astype(self.dtype)
self.w = np.asarray([0.5]).astype(self.dtype)
self.res_ref = self.x + 0.5 * (self.y - self.x)
self.w = np.asarray([0.75]).astype(self.dtype)
self.res_ref = self.x + self.w * (self.y - self.x)
self.place = [paddle.CPUPlace()]
if core.is_compiled_with_cuda():
self.place.append(paddle.CUDAPlace(0))
Expand Down Expand Up @@ -113,14 +113,26 @@ def run(place):
paddle.disable_static(place)
x = paddle.to_tensor(self.x)
y = paddle.to_tensor(self.y)
w = paddle.to_tensor(np.full(4, 0.5).astype(self.dtype))
w = paddle.to_tensor(np.full(4, 0.75).astype(self.dtype))
out = paddle.lerp(x, y, w)
self.assertEqual(np.allclose(self.res_ref, out.numpy()), True)
paddle.enable_static()

for place in self.place:
run(place)

def test_inplace_api(self):
def run(place):
paddle.disable_static(place)
x = paddle.to_tensor(self.x)
y = paddle.to_tensor(self.y)
out = x.lerp_(y, 0.75)
self.assertEqual(np.allclose(self.res_ref, out.numpy()), True)
paddle.enable_static()

for place in self.place:
run(place)

def test_x_broadcast_y(self):
paddle.disable_static()
x = np.arange(1., 21.).astype(self.dtype).reshape([2, 2, 5])
Expand All @@ -132,9 +144,9 @@ def test_x_broadcast_y(self):

def test_x_y_broadcast_w(self):
paddle.disable_static()
x = np.arange(1., 11.).astype(self.dtype).reshape([2, 5])
y = np.full(20, 10.).astype(self.dtype).reshape([2, 2, 5])
w = np.full(40, 0.5).astype(self.dtype).reshape([2, 2, 2, 5])
x = np.arange(11., 21.).astype(self.dtype).reshape([2, 5])
y = np.full(20, 7.5).astype(self.dtype).reshape([2, 2, 5])
w = np.full(40, 0.225).astype(self.dtype).reshape([2, 2, 2, 5])
out = paddle.lerp(
paddle.to_tensor(x), paddle.to_tensor(y), paddle.to_tensor(w))
res_ref = x + w * (y - x)
zhhsplendid marked this conversation as resolved.
Show resolved Hide resolved
Expand Down
3 changes: 3 additions & 0 deletions python/paddle/tensor/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,6 +190,7 @@
from .math import lgamma # noqa: F401
from .math import diagonal # noqa: F401
from .math import lerp # noqa: F401
from .math import lerp_ # noqa: F401
from .math import rad2deg # noqa: F401
from .math import deg2rad # noqa: F401
from .math import diff # noqa: F401
Expand Down Expand Up @@ -407,6 +408,8 @@
'solve',
'triangular_solve',
'diff',
'lerp',
'lerp_',
'angle',
]

Expand Down
17 changes: 15 additions & 2 deletions python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -2646,19 +2646,32 @@ def lerp(x, y, weight, name=None):
if in_dygraph_mode():
check_type(weight, 'weight', (float, paddle.Tensor, Variable), 'lerp')
if isinstance(weight, float):
weight = paddle.to_tensor(weight, dtype=x.dtype, place=paddle.CPUPlace())
weight = paddle.to_tensor(weight, dtype=x.dtype)
return _C_ops.lerp(x, y, weight)

check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'lerp')
check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'lerp')
check_variable_and_dtype(weight, 'Weight', ['float32', 'float64'], 'lerp')
check_variable_and_dtype(weight, 'weight', ['float32', 'float64'], 'lerp')

helper = LayerHelper('lerp', **locals())
inputs = {'X': x, 'Y': y, 'Weight': weight}
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='lerp', inputs=inputs, outputs={'Out': out})
return out

@inplace_apis_in_dygraph_only
def lerp_(x, y, weight, name=None):
zhhsplendid marked this conversation as resolved.
Show resolved Hide resolved
r"""
Inplace version of ``lerp`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_tensor_lerp`.
"""
out_shape = broadcast_shape(x.shape, y.shape)
if isinstance(weight, (paddle.Tensor, Variable)):
out_shape = broadcast_shape(out_shape, weight.shape)
if out_shape != x.shape:
raise ValueError("The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(out_shape, x.shape))
return lerp(x, y, weight, name)

def rad2deg(x, name=None):
"""
Convert each of the elements of input x from angles in radians to degrees.
Expand Down