forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathUpSampleLinear1d.cu
45 lines (39 loc) · 1.2 KB
/
UpSampleLinear1d.cu
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/LegacyTHFunctions.h>
namespace at {
namespace native {
Tensor& upsample_linear1d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners) {
return at::legacy::th::_thnn_upsample_linear1d_forward_out(
output, input, output_size, align_corners);
}
Tensor upsample_linear1d_cuda(
const Tensor& input,
IntArrayRef output_size,
bool align_corners) {
return at::legacy::th::_thnn_upsample_linear1d_forward(
input, output_size, align_corners);
}
Tensor& upsample_linear1d_backward_out_cuda(
Tensor& grad_input,
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners) {
return at::legacy::th::_thnn_upsample_linear1d_backward_out(
grad_input, grad_output, output_size, input_size, align_corners);
}
Tensor upsample_linear1d_backward_cuda(
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners) {
return at::legacy::th::_thnn_upsample_linear1d_backward(
grad_output, output_size, input_size, align_corners);
}
} // native
} // at