-
Notifications
You must be signed in to change notification settings - Fork 5.6k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
use elementwise to optimize gelu backward implementation on GPU #38263
Changes from 3 commits
e2ea903
3302e63
7c0d65c
65af9f6
33a27c4
bf4e1d6
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -12,9 +12,76 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
See the License for the specific language governing permissions and | ||
limitations under the License. */ | ||
|
||
#include "paddle/fluid/operators/amp/fp16_type_traits.h" | ||
#include "paddle/fluid/operators/elementwise/elementwise_op_broadcast.cu.h" | ||
#include "paddle/fluid/operators/gelu_op.h" | ||
#include "paddle/fluid/platform/float16.h" | ||
|
||
namespace paddle { | ||
namespace operators { | ||
|
||
template <typename T> | ||
struct GeluWithApproximateGradFunctor { | ||
using MPType = typename details::MPTypeTrait<T>::Type; | ||
inline HOSTDEVICE T operator()(T arg_x, T arg_dout) { | ||
MPType x = static_cast<MPType>(arg_x); | ||
MPType dout = static_cast<MPType>(arg_dout); | ||
MPType kAlpha = static_cast<MPType>(M_2_SQRTPI * M_SQRT1_2); | ||
MPType one = static_cast<MPType>(1); | ||
MPType half = static_cast<MPType>(0.5); | ||
auto tanh_out = | ||
tanh(kAlpha * x * (one + static_cast<MPType>(0.044715) * x * x)); | ||
auto ans = | ||
half * x * ((one - tanh_out * tanh_out) * | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 公式再化简一下 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Done. |
||
(kAlpha + static_cast<MPType>(0.1070322243) * x * x)) + | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 不要出现公式以外的魔鬼数字,都用表达式来代替 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Done. |
||
half * (one + tanh_out); | ||
return static_cast<T>(ans * dout); | ||
} | ||
}; | ||
|
||
template <typename T> | ||
struct GeluWithoutApproximateGradFunctor { | ||
using MPType = typename details::MPTypeTrait<T>::Type; | ||
inline HOSTDEVICE T operator()(T arg_x, T arg_dout) { | ||
MPType x = static_cast<MPType>(arg_x); | ||
MPType dout = static_cast<MPType>(arg_dout); | ||
MPType kAlpha = static_cast<MPType>(M_2_SQRTPI * M_SQRT1_2); | ||
MPType one = static_cast<MPType>(1); | ||
MPType half = static_cast<MPType>(0.5); | ||
auto ans = half * (one + erf(x * static_cast<MPType>(M_SQRT1_2))) + | ||
half * kAlpha * x * exp(-half * x * x); | ||
return static_cast<T>(ans * dout); | ||
} | ||
}; | ||
|
||
template <typename T> | ||
class GeluGradKernel<platform::CUDADeviceContext, T> | ||
: public framework::OpKernel<T> { | ||
public: | ||
void Compute(const framework::ExecutionContext& context) const override { | ||
auto* x = context.Input<framework::Tensor>("X"); | ||
auto* dout = | ||
context.Input<framework::Tensor>(framework::GradVarName("Out")); | ||
auto* dx = context.Output<framework::Tensor>(framework::GradVarName("X")); | ||
auto approximate = context.Attr<bool>("approximate"); | ||
dx->mutable_data<T>(dout->place()); | ||
|
||
std::vector<const framework::Tensor*> ins = {x, dout}; | ||
std::vector<framework::Tensor*> outs = {dx}; | ||
const auto& dev_ctx = | ||
context.template device_context<platform::CUDADeviceContext>(); | ||
if (approximate) { | ||
LaunchElementwiseCudaKernel<ElementwiseType::kBinary, T, T>( | ||
dev_ctx, ins, &outs, 0, GeluWithApproximateGradFunctor<T>()); | ||
} else { | ||
LaunchElementwiseCudaKernel<ElementwiseType::kBinary, T, T>( | ||
dev_ctx, ins, &outs, 0, GeluWithoutApproximateGradFunctor<T>()); | ||
} | ||
} | ||
}; | ||
} // namespace operators | ||
} // namespace paddle | ||
|
||
namespace ops = paddle::operators; | ||
REGISTER_OP_CUDA_KERNEL( | ||
gelu, ops::GeluKernel<paddle::platform::CUDADeviceContext, float>, | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
这个头文件引用已经在
paddle/fluid/operators/amp/fp16_type_traits.h
引用过了,可以删除There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done.