forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
ForeachBinaryOpScalar.cu
98 lines (83 loc) · 4.97 KB
/
ForeachBinaryOpScalar.cu
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
#include <ATen/Dispatch.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/cuda/ForeachFunctors.cuh>
#include <ATen/native/BinaryOps.h>
namespace at { namespace native {
template<template<class> class Op>
std::vector<Tensor> foreach_binary_op(TensorList tensors, const Scalar& scalar) {
std::vector<std::vector<at::Tensor>> tensor_lists;
std::vector<at::Tensor> vec_res;
vec_res.reserve(tensors.size());
for (const auto& t: tensors) {
vec_res.emplace_back(at::native::empty_like(t));
}
tensor_lists.emplace_back(tensors.vec());
tensor_lists.emplace_back(std::move(vec_res));
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kBool, kBFloat16, kHalf, tensors[0].scalar_type(), "foreach_binary_op_scalar_cuda", [&]() {
using opmath_t = get_opmath_t<scalar_t>::opmath_t;
multi_tensor_apply<2>(tensor_lists,
BinaryOpScalarFunctor<scalar_t,
/* depth */ 2,
/* r_args_depth */ 1,
/* res_arg_index */ 1>(),
Op<opmath_t>(),
scalar.to<opmath_t>());
});
return tensor_lists[1];
}
template<template<class> class Op>
void foreach_binary_op_(TensorList tensors, const Scalar& scalar) {
std::vector<std::vector<at::Tensor>> tensor_lists;
tensor_lists.emplace_back(tensors.vec());
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kBool, kBFloat16, kHalf, tensors[0].scalar_type(), "foreach_binary_op_scalar_cuda_", [&]() {
using opmath_t = get_opmath_t<scalar_t>::opmath_t;
multi_tensor_apply<1>(tensor_lists,
BinaryOpScalarFunctor<scalar_t,
/* depth */ 1,
/* r_args_depth */ 1,
/* res_arg_index */ 0>(),
Op<opmath_t>(),
scalar.to<opmath_t>());
});
}
#define FOREACH_BINARY_OP_SCALAR(NAME, OP, DIVISION_OP) \
void foreach_tensor_##NAME##_scalar_kernel_cuda_(TensorList tensors, const Scalar& scalar) { \
check_foreach_api_restrictions(tensors); \
if (!can_use_fast_route(tensors, scalar, DIVISION_OP)) { \
return at::native::foreach_tensor_##NAME##_scalar_kernel_slow_(tensors, scalar); \
} \
\
foreach_binary_op_<OP>(tensors, scalar); \
} \
\
std::vector<Tensor> foreach_tensor_##NAME##_scalar_kernel_cuda(TensorList tensors, const Scalar& scalar) { \
check_foreach_api_restrictions(tensors); \
if (!can_use_fast_route(tensors, scalar, DIVISION_OP)) { \
return at::native::foreach_tensor_##NAME##_scalar_kernel_slow(tensors, scalar); \
} \
\
return foreach_binary_op<OP>(tensors, scalar); \
}
FOREACH_BINARY_OP_SCALAR(add, std::plus, false);
FOREACH_BINARY_OP_SCALAR(mul, std::multiplies, false);
// In the case of division, integer inputs will result in float.
// Currently multi tensor apply can only return result of the same type as input.
FOREACH_BINARY_OP_SCALAR(div, std::divides, true);
// In the case of subtraction, we dont allow scalar to be boolean following the torch.sub logic
void foreach_tensor_sub_scalar_kernel_cuda_(TensorList tensors, const Scalar& scalar) {
check_foreach_api_restrictions(tensors);
at::native::sub_check(tensors[0], scalar);
if (!can_use_fast_route(tensors, scalar)) {
return at::native::foreach_tensor_sub_scalar_kernel_slow_(tensors, scalar);
}
foreach_binary_op_<std::minus>(tensors, scalar);
}
std::vector<Tensor> foreach_tensor_sub_scalar_kernel_cuda(TensorList tensors, const Scalar& scalar) {
check_foreach_api_restrictions(tensors);
at::native::sub_check(tensors[0], scalar);
if (!can_use_fast_route(tensors, scalar)) {
return at::native::foreach_tensor_sub_scalar_kernel_slow(tensors, scalar);
}
return foreach_binary_op<std::minus>(tensors, scalar);
}
}} // namespace at::native