Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Revert "[Phi] trans logsumexp op" #41068

Merged
merged 1 commit into from
Mar 29, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
93 changes: 85 additions & 8 deletions paddle/fluid/operators/reduce_ops/logsumexp_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,20 +12,91 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/operators/reduce_ops/logsumexp_op.h"
#include <algorithm>
#include <string>
#include <vector>
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/operators/reduce_ops/reduce_op_function.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h"

namespace paddle {
namespace operators {

class LogsumexpOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;

void InferShape(framework::InferShapeContext* ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "logsumexp");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "logsumexp");
auto x_dims = ctx->GetInputDim("X");
auto x_rank = x_dims.size();
PADDLE_ENFORCE_LE(x_rank, 4,
platform::errors::InvalidArgument(
"The input tensor X's dimensions of logsumexp "
"should be less or equal than 4. But received X's "
"dimensions = %d, X's shape = [%s].",
x_rank, x_dims));
auto axis = ctx->Attrs().Get<std::vector<int>>("axis");
PADDLE_ENFORCE_GT(
axis.size(), 0,
platform::errors::InvalidArgument(
"The size of axis of logsumexp "
"should be greater than 0. But received the size of axis "
"of logsumexp is %d.",
axis.size()));

for (size_t i = 0; i < axis.size(); i++) {
PADDLE_ENFORCE_LT(axis[i], x_rank,
platform::errors::InvalidArgument(
"axis[%d] should be in the "
"range [-D, D), where D is the dimensions of X and "
"D is %d. But received axis[%d] = %d.",
i, x_rank, i, axis[i]));
PADDLE_ENFORCE_GE(axis[i], -x_rank,
platform::errors::InvalidArgument(
"axis[%d] should be in the "
"range [-D, D), where D is the dimensions of X and "
"D is %d. But received axis[%d] = %d.",
i, x_rank, i, axis[i]));
if (axis[i] < 0) {
axis[i] += x_rank;
}
}

bool keepdim = ctx->Attrs().Get<bool>("keepdim");
bool reduce_all = ctx->Attrs().Get<bool>("reduce_all");
auto dims_vector = vectorize(x_dims);
if (reduce_all) {
if (keepdim)
ctx->SetOutputDim("Out",
phi::make_ddim(std::vector<int64_t>(x_rank, 1)));
else
ctx->SetOutputDim("Out", {1});
} else {
auto dims_vector = vectorize(x_dims);
if (keepdim) {
for (size_t i = 0; i < axis.size(); ++i) {
dims_vector[axis[i]] = 1;
}
} else {
const int kDelFlag = -1;
for (size_t i = 0; i < axis.size(); ++i) {
dims_vector[axis[i]] = kDelFlag;
}
dims_vector.erase(
std::remove(dims_vector.begin(), dims_vector.end(), kDelFlag),
dims_vector.end());
}
if (!keepdim && dims_vector.size() == 0) {
dims_vector.push_back(1);
}
auto out_dims = phi::make_ddim(dims_vector);
ctx->SetOutputDim("Out", out_dims);
if (axis.size() > 0 && axis[0] != 0) {
// Only pass LoD when not reducing on the first dim.
ctx->ShareLoD("X", /*->*/ "Out");
}
}
}
};

class LogsumexpOpMaker : public framework::OpProtoAndCheckerMaker {
Expand Down Expand Up @@ -93,10 +164,16 @@ class LogsumexpGradOpMaker : public framework::SingleGradOpMaker<T> {
} // namespace paddle

namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(logsumexp, LogsumexpInferShapeFunctor,
PD_INFER_META(phi::LogsumexpInferMeta));

REGISTER_OPERATOR(logsumexp, ops::LogsumexpOp, ops::LogsumexpOpMaker,
ops::LogsumexpGradOpMaker<paddle::framework::OpDesc>,
ops::LogsumexpGradOpMaker<paddle::imperative::OpBase>,
LogsumexpInferShapeFunctor);
ops::LogsumexpGradOpMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(logsumexp_grad, ops::LogsumexpGrapOp);

REGISTER_OP_CPU_KERNEL(
logsumexp, ops::LogsumexpKernel<paddle::platform::CPUDeviceContext, float>,
ops::LogsumexpKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
logsumexp_grad,
ops::LogsumexpGradKernel<paddle::platform::CPUDeviceContext, float>,
ops::LogsumexpGradKernel<paddle::platform::CPUDeviceContext, double>);
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
Expand All @@ -12,12 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/logsumexp_kernel.h"
#include "paddle/fluid/operators/reduce_ops/logsumexp_op.h"

#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
namespace ops = paddle::operators;

#include "paddle/phi/kernels/impl/logsumexp_kernel_impl.h"

PD_REGISTER_KERNEL(
logsumexp, CPU, ALL_LAYOUT, phi::LogsumexpKernel, float, double) {}
REGISTER_OP_CUDA_KERNEL(
logsumexp, ops::LogsumexpKernel<paddle::platform::CUDADeviceContext, float>,
ops::LogsumexpKernel<paddle::platform::CUDADeviceContext, double>);
170 changes: 170 additions & 0 deletions paddle/fluid/operators/reduce_ops/logsumexp_op.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,170 @@
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <algorithm>
#include <vector>
#include "paddle/fluid/operators/reduce_ops/reduce_op_function.h"

namespace paddle {
namespace operators {

#define HANDLE_DIM(NDIM, RDIM) \
if (ndim == NDIM && rdim == RDIM) { \
paddle::operators::ReduceFunctor<DeviceContext, OutT, NDIM, RDIM, \
LogsumexpFunctor>( \
context.template device_context<DeviceContext>(), *input, output, \
axis, keepdim); \
}

struct LogsumexpFunctor {
template <typename DeviceContext, typename X, typename Y, typename Dim>
void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) {
auto x_dim = x->dimensions();
auto t_dim = x_dim;
for (int i = 0; i < static_cast<int>(dim.size()); i++) {
t_dim[dim[i]] = 1;
}

auto r_dim = x_dim;
for (int i = 0; i < static_cast<int>(r_dim.size()); i++) {
r_dim[i] = 1;
}
for (int i = 0; i < static_cast<int>(dim.size()); i++) {
r_dim[dim[i]] = x_dim[dim[i]];
}

auto y_dim = y->dimensions();
auto x_max = x->maximum(dim);
y->device(place) =
(x_max +
(*x - x_max.reshape(t_dim).broadcast(r_dim)).exp().sum(dim).log())
.reshape(y_dim);
}
};

struct LogsumexpGradFunctor {
template <typename DeviceContext, typename X, typename Y, typename DX,
typename DY, typename Dim>
void operator()(const DeviceContext& place, X* x, Y* y, DX* dx, DY* dy,
const Dim& dim, int size) {
dx->device(place) = dy->broadcast(dim) * (*x - y->broadcast(dim)).exp();
}
};

template <typename DeviceContext, typename OutT>
class LogsumexpKernel : public framework::OpKernel<OutT> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* input = context.Input<Tensor>("X");
auto* output = context.Output<Tensor>("Out");
output->mutable_data<OutT>(context.GetPlace());

auto axis = context.Attr<std::vector<int>>("axis");
auto keepdim = context.Attr<bool>("keepdim");
auto reduce_all = context.Attr<bool>("reduce_all");

const auto& input_dim_size = input->dims().size();
// The dims has full dim, set the reduce_all is True
reduce_all |= (static_cast<const int>(axis.size()) == input_dim_size);

if (reduce_all) {
// Flatten and reduce 1-D tensor
auto x = EigenVector<OutT>::Flatten(*input);
auto out = EigenScalar<OutT>::From(*output);
auto& place =
*context.template device_context<DeviceContext>().eigen_device();
auto reduce_dim = Eigen::array<int, 1>({{0}});
LogsumexpFunctor()(place, &x, &out, reduce_dim);
} else {
int ndim = input_dim_size;
int rdim = axis.size();
// comments for accelerating compiling temporarily.
// HANDLE_DIM(6, 5);
// HANDLE_DIM(6, 4);
// HANDLE_DIM(6, 3);
// HANDLE_DIM(6, 2);
// HANDLE_DIM(6, 1);
// HANDLE_DIM(5, 4);
// HANDLE_DIM(5, 3);
// HANDLE_DIM(5, 2);
// HANDLE_DIM(5, 1);
HANDLE_DIM(4, 3);
HANDLE_DIM(4, 2);
HANDLE_DIM(4, 1);
HANDLE_DIM(3, 2);
HANDLE_DIM(3, 1);
HANDLE_DIM(2, 1);
}
}
};

template <typename DeviceContext, typename T>
class LogsumexpGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* input = context.Input<Tensor>("X");
auto* output = context.Input<Tensor>("Out");
auto* output_grad = context.Input<Tensor>(framework::GradVarName("Out"));
auto* input_grad = context.Output<Tensor>(framework::GradVarName("X"));
input_grad->mutable_data<T>(context.GetPlace());

auto axis = context.Attr<std::vector<int>>("axis");
auto reduce_all = context.Attr<bool>("reduce_all");
const auto input_dim_size = context.Input<Tensor>("X")->dims().size();
reduce_all |= (static_cast<const int>(axis.size()) == input_dim_size);

if (reduce_all) {
auto x = EigenVector<T>::Flatten(*input);
auto y = EigenVector<T>::Flatten(*output);
auto dy = EigenVector<T>::Flatten(*output_grad);
auto dx = EigenVector<T>::Flatten(*input_grad);
auto& place =
*context.template device_context<DeviceContext>().eigen_device();
auto broadcast_dim =
Eigen::array<int, 1>({{static_cast<int>(input->numel())}});
LogsumexpGradFunctor()(place, &x, &y, &dx, &dy, broadcast_dim,
broadcast_dim[0]);
} else {
int rank = input->dims().size();
LogsumexpGradFunctor functor;
switch (rank) {
case 1:
ReduceGradFunctor<DeviceContext, T, 1, LogsumexpGradFunctor>(
context.template device_context<DeviceContext>(), *input, *output,
*output_grad, input_grad, functor, axis);
break;
case 2:
ReduceGradFunctor<DeviceContext, T, 2, LogsumexpGradFunctor>(
context.template device_context<DeviceContext>(), *input, *output,
*output_grad, input_grad, functor, axis);
break;
case 3:
ReduceGradFunctor<DeviceContext, T, 3, LogsumexpGradFunctor>(
context.template device_context<DeviceContext>(), *input, *output,
*output_grad, input_grad, functor, axis);
break;
case 4:
ReduceGradFunctor<DeviceContext, T, 4, LogsumexpGradFunctor>(
context.template device_context<DeviceContext>(), *input, *output,
*output_grad, input_grad, functor, axis);
break;
}
}
}
};

} // namespace operators
} // namespace paddle
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
Expand All @@ -12,11 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/logsumexp_grad_kernel.h"
// .part used to speed up nvcc compile
#include "paddle/fluid/operators/reduce_ops/logsumexp_op.h"

#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/logsumexp_grad_kernel_impl.h"
namespace ops = paddle::operators;

PD_REGISTER_KERNEL(
logsumexp_grad, CPU, ALL_LAYOUT, phi::LogsumexpGradKernel, float, double) {}
REGISTER_OP_CUDA_KERNEL(
logsumexp_grad,
ops::LogsumexpGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::LogsumexpGradKernel<paddle::platform::CUDADeviceContext, double>);
2 changes: 1 addition & 1 deletion paddle/fluid/operators/reduce_ops/logsumexp_op_xpu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

#ifdef PADDLE_WITH_XPU

#include "paddle/fluid/operators/reduce_ops/reduce_op_function.h"
#include "paddle/fluid/operators/reduce_ops/logsumexp_op.h"
#include "paddle/fluid/platform/device/xpu/xpu_header.h"
#include "paddle/fluid/platform/device_context.h"

Expand Down
Loading