Skip to content

Commit

Permalink
support tile op backward refuse forward (PaddlePaddle#45942)
Browse files Browse the repository at this point in the history
  • Loading branch information
Charles-hit committed Sep 19, 2022
1 parent 618dbcd commit 04e330c
Show file tree
Hide file tree
Showing 2 changed files with 79 additions and 5 deletions.
5 changes: 1 addition & 4 deletions paddle/phi/api/yaml/legacy_backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2500,10 +2500,7 @@
forward : tile_grad (Tensor x, Tensor grad_out, IntArray repeat_times) -> Tensor(grad_x)
args : (Tensor grad_x_grad, IntArray repeat_times)
output : Tensor(grad_out_grad)
infer_meta :
func : TileInferMeta
kernel :
func : tile
invoke : tile(grad_x_grad, repeat_times)

- backward_op : tile_grad
forward : tile (Tensor x, IntArray repeat_times) -> Tensor(out)
Expand Down
79 changes: 78 additions & 1 deletion python/paddle/fluid/tests/unittests/test_tile_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,10 @@
from op_test import OpTest
import paddle
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
from paddle.fluid import compiler, Program, program_guard, core
import gradient_checker
from decorator_helper import prog_scope
import paddle.fluid.layers as layers


#Situation 1: repeat_times is a list (without tensor)
Expand Down Expand Up @@ -263,6 +266,80 @@ def test_api(self):
assert np.array_equal(out_3.numpy(), np.tile(np_x, (2, 3)))


class TestTileDoubleGradCheck(unittest.TestCase):

def tile_wrapper(self, x):
return paddle.tile(x[0], [2, 1])

@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
eps = 0.005
dtype = np.float32

data = layers.data('data', [1, 2], False, dtype)
data.persistable = True
out = paddle.tile(data, [2, 1])
data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

gradient_checker.double_grad_check([data],
out,
x_init=[data_arr],
place=place,
eps=eps)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
gradient_checker.double_grad_check_for_dygraph(self.tile_wrapper,
[data],
out,
x_init=[data_arr],
place=place)

def test_grad(self):
paddle.enable_static()
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)


class TestTileTripleGradCheck(unittest.TestCase):

def tile_wrapper(self, x):
return paddle.tile(x[0], [2, 1])

@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
eps = 0.005
dtype = np.float32

data = layers.data('data', [1, 2], False, dtype)
data.persistable = True
out = paddle.tile(data, [2, 1])
data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

gradient_checker.triple_grad_check([data],
out,
x_init=[data_arr],
place=place,
eps=eps)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
gradient_checker.triple_grad_check_for_dygraph(self.tile_wrapper,
[data],
out,
x_init=[data_arr],
place=place)

def test_grad(self):
paddle.enable_static()
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)


if __name__ == "__main__":
paddle.enable_static()
unittest.main()

0 comments on commit 04e330c

Please sign in to comment.