Skip to content

Commit

Permalink
Add int16 support for several ops (#39636)
Browse files Browse the repository at this point in the history
* add more op int16 support

* fix xpu ci
  • Loading branch information
sneaxiy authored Feb 20, 2022
1 parent 2fe0426 commit 267275d
Show file tree
Hide file tree
Showing 31 changed files with 89 additions and 54 deletions.
1 change: 1 addition & 0 deletions paddle/fluid/operators/controlflow/compare_op.cu
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ class CompareOpKernel<platform::CUDADeviceContext, Functor, InverseFunctor>
REGISTER_OP_CUDA_KERNEL( \
op_type, \
ops::CompareOpKernel<plat::CUDADeviceContext, ops::func<bool>, void>, \
ops::CompareOpKernel<plat::CUDADeviceContext, ops::func<int16_t>, void>, \
ops::CompareOpKernel<plat::CUDADeviceContext, ops::func<int>, void>, \
ops::CompareOpKernel<plat::CUDADeviceContext, ops::func<int64_t>, void>, \
ops::CompareOpKernel<plat::CUDADeviceContext, ops::func<float>, void>, \
Expand Down
3 changes: 3 additions & 0 deletions paddle/fluid/operators/controlflow/compare_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,9 @@ class CompareOpKernel
::paddle::operators::CompareOpKernel< \
::paddle::platform::dev##DeviceContext, \
functor<int>, inverse_functor<int>>, \
::paddle::operators::CompareOpKernel< \
::paddle::platform::dev##DeviceContext, \
functor<int16_t>, inverse_functor<int16_t>>, \
::paddle::operators::CompareOpKernel< \
::paddle::platform::dev##DeviceContext, \
functor<int64_t>, inverse_functor<int64_t>>, \
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/operators/cumsum_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ REGISTER_OPERATOR(cumsum, ops::CumOp, ops::CumsumOpMaker,
ops::CumsumGradMaker<paddle::imperative::OpBase>);
REGISTER_OP_CPU_KERNEL(cumsum, ops::CumKernel<CPU, ops::CumsumFunctor<float>>,
ops::CumKernel<CPU, ops::CumsumFunctor<double>>,
ops::CumKernel<CPU, ops::CumsumFunctor<int16_t>>,
ops::CumKernel<CPU, ops::CumsumFunctor<int>>,
ops::CumKernel<CPU, ops::CumsumFunctor<int64_t>>);

Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/operators/cumsum_op.cu
Original file line number Diff line number Diff line change
Expand Up @@ -320,5 +320,6 @@ namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
cumsum, ops::CumCUDAKernel<paddle::platform::CUDADeviceContext, float>,
ops::CumCUDAKernel<paddle::platform::CUDADeviceContext, double>,
ops::CumCUDAKernel<paddle::platform::CUDADeviceContext, int16_t>,
ops::CumCUDAKernel<paddle::platform::CUDADeviceContext, int>,
ops::CumCUDAKernel<paddle::platform::CUDADeviceContext, int64_t>);
4 changes: 4 additions & 0 deletions paddle/fluid/operators/elementwise/elementwise_sub_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@ REGISTER_OP_CPU_KERNEL(
elementwise_sub,
ops::ElementwiseSubKernel<paddle::platform::CPUDeviceContext, float>,
ops::ElementwiseSubKernel<paddle::platform::CPUDeviceContext, double>,
ops::ElementwiseSubKernel<paddle::platform::CPUDeviceContext, int16_t>,
ops::ElementwiseSubKernel<paddle::platform::CPUDeviceContext, int>,
ops::ElementwiseSubKernel<paddle::platform::CPUDeviceContext, int64_t>,
ops::ElementwiseSubKernel<paddle::platform::CPUDeviceContext,
Expand All @@ -106,6 +107,7 @@ REGISTER_OP_CPU_KERNEL(
elementwise_sub_grad,
ops::ElementwiseSubGradKernel<paddle::platform::CPUDeviceContext, float>,
ops::ElementwiseSubGradKernel<paddle::platform::CPUDeviceContext, double>,
ops::ElementwiseSubGradKernel<paddle::platform::CPUDeviceContext, int16_t>,
ops::ElementwiseSubGradKernel<paddle::platform::CPUDeviceContext, int>,
ops::ElementwiseSubGradKernel<paddle::platform::CPUDeviceContext, int64_t>,
ops::ElementwiseSubGradKernel<paddle::platform::CPUDeviceContext,
Expand All @@ -118,6 +120,8 @@ REGISTER_OP_CPU_KERNEL(
float>,
ops::ElementwiseSubDoubleGradKernel<paddle::platform::CPUDeviceContext,
double>,
ops::ElementwiseSubDoubleGradKernel<paddle::platform::CPUDeviceContext,
int16_t>,
ops::ElementwiseSubDoubleGradKernel<paddle::platform::CPUDeviceContext,
int>,
ops::ElementwiseSubDoubleGradKernel<paddle::platform::CPUDeviceContext,
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/operators/fill_any_like_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,7 @@ REGISTER_OPERATOR(

REGISTER_OP_CPU_KERNEL(
fill_any_like,
ops::FillAnyLikeKernel<paddle::platform::CPUDeviceContext, int16_t>,
ops::FillAnyLikeKernel<paddle::platform::CPUDeviceContext, int>,
ops::FillAnyLikeKernel<paddle::platform::CPUDeviceContext, int64_t>,
ops::FillAnyLikeKernel<paddle::platform::CPUDeviceContext, float>,
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/operators/fill_any_like_op.cu
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ limitations under the License. */
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
fill_any_like,
ops::FillAnyLikeKernel<paddle::platform::CUDADeviceContext, int16_t>,
ops::FillAnyLikeKernel<paddle::platform::CUDADeviceContext, int32_t>,
ops::FillAnyLikeKernel<paddle::platform::CUDADeviceContext, int64_t>,
ops::FillAnyLikeKernel<paddle::platform::CUDADeviceContext, float>,
Expand Down
4 changes: 3 additions & 1 deletion paddle/fluid/operators/gather_nd_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,9 @@ REGISTER_OPERATOR(gather_nd_grad, ops::GatherNdGradOp,
REGISTER_OP_CPU_KERNEL(gather_nd, ops::GatherNdOpKernel<float>,
ops::GatherNdOpKernel<double>,
ops::GatherNdOpKernel<int64_t>,
ops::GatherNdOpKernel<int>, ops::GatherNdOpKernel<bool>,
ops::GatherNdOpKernel<int>,
ops::GatherNdOpKernel<int16_t>,
ops::GatherNdOpKernel<bool>,
ops::GatherNdOpKernel<uint8_t>);

REGISTER_OP_CPU_KERNEL(gather_nd_grad, ops::GatherNdGradOpKernel<float>,
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/operators/gather_nd_op.cu
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,7 @@ REGISTER_OP_CUDA_KERNEL(gather_nd, ops::GatherNdOpCUDAKernel<CUDA, float>,
ops::GatherNdOpCUDAKernel<CUDA, double>,
ops::GatherNdOpCUDAKernel<CUDA, int64_t>,
ops::GatherNdOpCUDAKernel<CUDA, int>,
ops::GatherNdOpCUDAKernel<CUDA, int16_t>,
ops::GatherNdOpCUDAKernel<CUDA, bool>,
ops::GatherNdOpCUDAKernel<CUDA, plat::float16>);

Expand Down
2 changes: 2 additions & 0 deletions paddle/fluid/operators/reduce_ops/reduce_sum_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,8 @@ REGISTER_OP_CPU_KERNEL(
ops::SumFunctor>,
ops::ReduceKernel<paddle::platform::CPUDeviceContext,
paddle::platform::float16, ops::SumFunctor>,
ops::ReduceKernel<paddle::platform::CPUDeviceContext, int16_t,
ops::SumFunctor>,
ops::ReduceKernel<paddle::platform::CPUDeviceContext, int, ops::SumFunctor>,
ops::ReduceKernel<paddle::platform::CPUDeviceContext, int64_t,
ops::SumFunctor>,
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/operators/reduce_ops/reduce_sum_op.cu
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ REGISTER_OP_CUDA_KERNEL(
ops::ReduceCudaKernel<double, kps::AddFunctor, kps::IdentityFunctor>,
ops::ReduceCudaKernel<paddle::platform::float16, kps::AddFunctor,
kps::IdentityFunctor>,
ops::ReduceCudaKernel<int16_t, kps::AddFunctor, kps::IdentityFunctor>,
ops::ReduceCudaKernel<int, kps::AddFunctor, kps::IdentityFunctor>,
ops::ReduceCudaKernel<int64_t, kps::AddFunctor, kps::IdentityFunctor>,
ops::ReduceCudaKernel<paddle::platform::complex<float>, kps::AddFunctor,
Expand Down
24 changes: 13 additions & 11 deletions paddle/fluid/operators/reshape_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -639,10 +639,12 @@ REGISTER_OPERATOR(reshape_grad, ops::ReshapeGradOp,
ops::ReshapeGradInplaceInferer);

REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape, float, ops::ReshapeKernel, double,
ops::ReshapeKernel, int, ops::ReshapeKernel,
int64_t, ops::ReshapeKernel);
ops::ReshapeKernel, int16_t, ops::ReshapeKernel,
int, ops::ReshapeKernel, int64_t,
ops::ReshapeKernel);
REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape_grad, float, ops::ReshapeGradKernel,
double, ops::ReshapeGradKernel, int,
double, ops::ReshapeGradKernel, int16_t,
ops::ReshapeGradKernel, int,
ops::ReshapeGradKernel, int64_t,
ops::ReshapeGradKernel);
REGISTER_OPERATOR(reshape2, ops::Reshape2Op, ops::Reshape2OpMaker,
Expand All @@ -659,15 +661,15 @@ REGISTER_OPERATOR(reshape2_grad_grad, ops::Reshape2DoubleGradOp,

#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape, float, ops::ReshapeKernel, double,
ops::ReshapeKernel, int, ops::ReshapeKernel,
uint8_t, ops::ReshapeKernel, int64_t,
ops::ReshapeKernel, plat::float16,
ops::ReshapeKernel, plat::bfloat16,
ops::ReshapeKernel);
ops::ReshapeKernel, int16_t, ops::ReshapeKernel,
int, ops::ReshapeKernel, uint8_t,
ops::ReshapeKernel, int64_t, ops::ReshapeKernel,
plat::float16, ops::ReshapeKernel,
plat::bfloat16, ops::ReshapeKernel);
REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape_grad, float, ops::ReshapeGradKernel,
double, ops::ReshapeGradKernel, int,
ops::ReshapeGradKernel, int64_t,
ops::ReshapeGradKernel, uint8_t,
double, ops::ReshapeGradKernel, int16_t,
ops::ReshapeKernel, int, ops::ReshapeGradKernel,
int64_t, ops::ReshapeGradKernel, uint8_t,
ops::ReshapeGradKernel, plat::float16,
ops::ReshapeGradKernel, plat::bfloat16,
ops::ReshapeGradKernel);
Expand Down
4 changes: 4 additions & 0 deletions paddle/fluid/operators/unsqueeze_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -362,6 +362,7 @@ REGISTER_OP_CPU_KERNEL(
ops::UnsqueezeKernel<paddle::platform::CPUDeviceContext, double>,
ops::UnsqueezeKernel<paddle::platform::CPUDeviceContext, bool>,
ops::UnsqueezeKernel<paddle::platform::CPUDeviceContext, int>,
ops::UnsqueezeKernel<paddle::platform::CPUDeviceContext, int16_t>,
ops::UnsqueezeKernel<paddle::platform::CPUDeviceContext, uint8_t>,
ops::UnsqueezeKernel<paddle::platform::CPUDeviceContext, int8_t>,
ops::UnsqueezeKernel<paddle::platform::CPUDeviceContext, int64_t>,
Expand All @@ -377,6 +378,7 @@ REGISTER_OP_CPU_KERNEL(
ops::UnsqueezeGradKernel<paddle::platform::CPUDeviceContext, double>,
ops::UnsqueezeGradKernel<paddle::platform::CPUDeviceContext, bool>,
ops::UnsqueezeGradKernel<paddle::platform::CPUDeviceContext, int>,
ops::UnsqueezeGradKernel<paddle::platform::CPUDeviceContext, int16_t>,
ops::UnsqueezeGradKernel<paddle::platform::CPUDeviceContext, uint8_t>,
ops::UnsqueezeGradKernel<paddle::platform::CPUDeviceContext, int8_t>,
ops::UnsqueezeGradKernel<paddle::platform::CPUDeviceContext, int64_t>,
Expand All @@ -391,6 +393,7 @@ REGISTER_OP_CPU_KERNEL(
ops::UnsqueezeKernel<paddle::platform::CPUDeviceContext, double>,
ops::UnsqueezeKernel<paddle::platform::CPUDeviceContext, bool>,
ops::UnsqueezeKernel<paddle::platform::CPUDeviceContext, int>,
ops::UnsqueezeKernel<paddle::platform::CPUDeviceContext, int16_t>,
ops::UnsqueezeKernel<paddle::platform::CPUDeviceContext, uint8_t>,
ops::UnsqueezeKernel<paddle::platform::CPUDeviceContext, int8_t>,
ops::UnsqueezeKernel<paddle::platform::CPUDeviceContext, int64_t>,
Expand All @@ -406,6 +409,7 @@ REGISTER_OP_CPU_KERNEL(
ops::Unsqueeze2GradKernel<paddle::platform::CPUDeviceContext, double>,
ops::Unsqueeze2GradKernel<paddle::platform::CPUDeviceContext, bool>,
ops::Unsqueeze2GradKernel<paddle::platform::CPUDeviceContext, int>,
ops::Unsqueeze2GradKernel<paddle::platform::CPUDeviceContext, int16_t>,
ops::Unsqueeze2GradKernel<paddle::platform::CPUDeviceContext, uint8_t>,
ops::Unsqueeze2GradKernel<paddle::platform::CPUDeviceContext, int8_t>,
ops::Unsqueeze2GradKernel<paddle::platform::CPUDeviceContext, int64_t>,
Expand Down
4 changes: 4 additions & 0 deletions paddle/fluid/operators/unsqueeze_op.cu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ REGISTER_OP_CUDA_KERNEL(
ops::UnsqueezeKernel<paddle::platform::CUDADeviceContext, plat::bfloat16>,
ops::UnsqueezeKernel<paddle::platform::CUDADeviceContext, bool>,
ops::UnsqueezeKernel<paddle::platform::CUDADeviceContext, int>,
ops::UnsqueezeKernel<paddle::platform::CUDADeviceContext, int16_t>,
ops::UnsqueezeKernel<paddle::platform::CUDADeviceContext, uint8_t>,
ops::UnsqueezeKernel<paddle::platform::CUDADeviceContext, int8_t>,
ops::UnsqueezeKernel<paddle::platform::CUDADeviceContext, int64_t>,
Expand All @@ -41,6 +42,7 @@ REGISTER_OP_CUDA_KERNEL(
plat::bfloat16>,
ops::UnsqueezeGradKernel<paddle::platform::CUDADeviceContext, bool>,
ops::UnsqueezeGradKernel<paddle::platform::CUDADeviceContext, int>,
ops::UnsqueezeGradKernel<paddle::platform::CUDADeviceContext, int16_t>,
ops::UnsqueezeGradKernel<paddle::platform::CUDADeviceContext, int8_t>,
ops::UnsqueezeGradKernel<paddle::platform::CUDADeviceContext, uint8_t>,
ops::UnsqueezeGradKernel<paddle::platform::CUDADeviceContext, int64_t>,
Expand All @@ -56,6 +58,7 @@ REGISTER_OP_CUDA_KERNEL(
ops::UnsqueezeKernel<paddle::platform::CUDADeviceContext, plat::bfloat16>,
ops::UnsqueezeKernel<paddle::platform::CUDADeviceContext, bool>,
ops::UnsqueezeKernel<paddle::platform::CUDADeviceContext, int>,
ops::UnsqueezeKernel<paddle::platform::CUDADeviceContext, int16_t>,
ops::UnsqueezeKernel<paddle::platform::CUDADeviceContext, uint8_t>,
ops::UnsqueezeKernel<paddle::platform::CUDADeviceContext, int8_t>,
ops::UnsqueezeKernel<paddle::platform::CUDADeviceContext, int64_t>,
Expand All @@ -73,6 +76,7 @@ REGISTER_OP_CUDA_KERNEL(
plat::bfloat16>,
ops::Unsqueeze2GradKernel<paddle::platform::CUDADeviceContext, bool>,
ops::Unsqueeze2GradKernel<paddle::platform::CUDADeviceContext, int>,
ops::Unsqueeze2GradKernel<paddle::platform::CUDADeviceContext, int16_t>,
ops::Unsqueeze2GradKernel<paddle::platform::CUDADeviceContext, uint8_t>,
ops::Unsqueeze2GradKernel<paddle::platform::CUDADeviceContext, int8_t>,
ops::Unsqueeze2GradKernel<paddle::platform::CUDADeviceContext, int64_t>,
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/operators/where_index_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ REGISTER_OP_WITHOUT_GRADIENT(where_index, ops::WhereIndexOp,
ops::WhereIndexOpMaker);
REGISTER_OP_CPU_KERNEL(where_index, ops::CPUWhereIndexKernel<int64_t>,
ops::CPUWhereIndexKernel<int>,
ops::CPUWhereIndexKernel<int16_t>,
ops::CPUWhereIndexKernel<bool>,
ops::CPUWhereIndexKernel<float>,
ops::CPUWhereIndexKernel<double>);
1 change: 1 addition & 0 deletions paddle/fluid/operators/where_index_op.cu
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,7 @@ class CUDAWhereIndexKernel : public framework::OpKernel<T> {
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(where_index, ops::CUDAWhereIndexKernel<int64_t>,
ops::CUDAWhereIndexKernel<int>,
ops::CUDAWhereIndexKernel<int16_t>,
ops::CUDAWhereIndexKernel<bool>,
ops::CUDAWhereIndexKernel<float>,
ops::CUDAWhereIndexKernel<double>);
5 changes: 5 additions & 0 deletions paddle/pten/kernels/cpu/elementwise_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,7 @@ PT_REGISTER_KERNEL(add_grad,
pten::AddGradKernel,
float,
double,
int16_t,
int,
int64_t,
pten::dtype::complex<float>,
Expand All @@ -143,6 +144,7 @@ PT_REGISTER_KERNEL(add_double_grad,
pten::AddDoubleGradKernel,
float,
double,
int16_t,
int,
int64_t,
pten::dtype::complex<float>,
Expand All @@ -154,6 +156,7 @@ PT_REGISTER_KERNEL(add_triple_grad,
pten::AddTripleGradKernel,
float,
double,
int16_t,
int,
int64_t,
pten::dtype::complex<float>,
Expand All @@ -165,6 +168,7 @@ PT_REGISTER_KERNEL(subtract_grad,
pten::SubtractGradKernel,
float,
double,
int16_t,
int,
int64_t,
pten::dtype::complex<float>,
Expand All @@ -176,6 +180,7 @@ PT_REGISTER_KERNEL(subtract_double_grad,
pten::SubtractDoubleGradKernel,
float,
double,
int16_t,
int,
int64_t,
pten::dtype::complex<float>,
Expand Down
1 change: 1 addition & 0 deletions paddle/pten/kernels/cpu/full_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,7 @@ PT_REGISTER_KERNEL(full_like,
pten::FullLikeKernel,
float,
double,
int16_t,
int,
int64_t,
bool,
Expand Down
3 changes: 3 additions & 0 deletions paddle/pten/kernels/cpu/math_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,7 @@ PT_REGISTER_KERNEL(add_raw,
pten::AddRawKernel,
float,
double,
int16_t,
int,
int64_t,
complex64,
Expand All @@ -134,6 +135,7 @@ PT_REGISTER_KERNEL(subtract_raw,
pten::SubtractRawKernel,
float,
double,
int16_t,
int,
int64_t,
complex64,
Expand Down Expand Up @@ -167,6 +169,7 @@ PT_REGISTER_KERNEL(sum_raw,
float,
double,
pten::dtype::float16,
int16_t,
int,
int64_t,
complex64,
Expand Down
6 changes: 6 additions & 0 deletions paddle/pten/kernels/flatten_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ PT_REGISTER_KERNEL(flatten,
double,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {}

Expand All @@ -67,6 +68,7 @@ PT_REGISTER_KERNEL(flatten_with_xshape,
double,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {}

Expand All @@ -80,6 +82,7 @@ PT_REGISTER_KERNEL(flatten,
double,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {}

Expand All @@ -92,6 +95,7 @@ PT_REGISTER_KERNEL(flatten_with_xshape,
double,
uint8_t,
int8_t,
int16_t,
int,
int64_t) {}
#endif
Expand All @@ -104,6 +108,7 @@ PT_REGISTER_KERNEL(flatten,
float,
pten::dtype::float16,
int8_t,
int16_t,
int,
int64_t) {}

Expand All @@ -114,6 +119,7 @@ PT_REGISTER_KERNEL(flatten_with_xshape,
float,
pten::dtype::float16,
int8_t,
int16_t,
int,
int64_t) {}
#endif
1 change: 1 addition & 0 deletions paddle/pten/kernels/gpu/full_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,7 @@ PT_REGISTER_KERNEL(full_like,
pten::FullLikeKernel,
float,
double,
int16_t,
int,
int64_t,
bool,
Expand Down
3 changes: 3 additions & 0 deletions paddle/pten/kernels/gpu/math_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,7 @@ PT_REGISTER_KERNEL(add_raw,
pten::AddRawKernel,
float,
double,
int16_t,
int,
int64_t,
float16,
Expand All @@ -112,6 +113,7 @@ PT_REGISTER_KERNEL(subtract_raw,
pten::SubtractRawKernel,
float,
double,
int16_t,
int,
int64_t,
float16,
Expand Down Expand Up @@ -148,6 +150,7 @@ PT_REGISTER_KERNEL(sum_raw,
float,
double,
float16,
int16_t,
int,
int64_t,
complex64,
Expand Down
Loading

0 comments on commit 267275d

Please sign in to comment.