Skip to content

Commit

Permalink
rm eager guard tests part3_1 (#49059)
Browse files Browse the repository at this point in the history
  • Loading branch information
yjjiang11 authored Dec 15, 2022
1 parent d0fefa2 commit d808f16
Show file tree
Hide file tree
Showing 6 changed files with 16 additions and 54 deletions.
20 changes: 9 additions & 11 deletions python/paddle/fluid/tests/unittests/test_cast_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
import paddle.fluid.core as core
import paddle.fluid.layers as layers
from paddle.fluid import Program, program_guard
from paddle.fluid.framework import _test_eager_guard


class TestCastOpFp32ToFp64(OpTest):
Expand Down Expand Up @@ -122,16 +121,15 @@ def test_errors(self):
class TestCastOpEager(unittest.TestCase):
def test_eager(self):
with paddle.fluid.dygraph.base.guard():
with _test_eager_guard():
x = paddle.ones([2, 2], dtype="float16")
x.stop_gradient = False
out = paddle.cast(x, "float32")
np.testing.assert_array_equal(
out, np.ones([2, 2]).astype('float32')
)
out.backward()
np.testing.assert_array_equal(x.gradient(), x.numpy())
self.assertTrue(x.gradient().dtype == np.float16)
x = paddle.ones([2, 2], dtype="float16")
x.stop_gradient = False
out = paddle.cast(x, "float32")
np.testing.assert_array_equal(
out, np.ones([2, 2]).astype('float32')
)
out.backward()
np.testing.assert_array_equal(x.gradient(), x.numpy())
self.assertTrue(x.gradient().dtype == np.float16)


class TestCastDoubleGradCheck(unittest.TestCase):
Expand Down
21 changes: 7 additions & 14 deletions python/paddle/fluid/tests/unittests/test_clip_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
from paddle.fluid.framework import _test_eager_guard


class TestClipOp(OpTest):
Expand Down Expand Up @@ -231,7 +230,7 @@ def test_clip(self):
)
paddle.disable_static()

def func_clip_dygraph(self):
def test_clip_dygraph(self):
paddle.disable_static()
place = (
fluid.CUDAPlace(0)
Expand Down Expand Up @@ -279,20 +278,14 @@ def func_clip_dygraph(self):
out_6.numpy(), data.clip(0.2, 0.8), rtol=1e-05
)

def test_clip_dygraph(self):
with _test_eager_guard():
self.func_clip_dygraph()
self.func_clip_dygraph()

def test_clip_dygraph_default_max(self):
paddle.disable_static()
with _test_eager_guard():
x_int32 = paddle.to_tensor([1, 2, 3], dtype="int32")
x_int64 = paddle.to_tensor([1, 2, 3], dtype="int64")
x_f32 = paddle.to_tensor([1, 2, 3], dtype="float32")
egr_out1 = paddle.clip(x_int32, min=1)
egr_out2 = paddle.clip(x_int64, min=1)
egr_out3 = paddle.clip(x_f32, min=1)
x_int32 = paddle.to_tensor([1, 2, 3], dtype="int32")
x_int64 = paddle.to_tensor([1, 2, 3], dtype="int64")
x_f32 = paddle.to_tensor([1, 2, 3], dtype="float32")
egr_out1 = paddle.clip(x_int32, min=1)
egr_out2 = paddle.clip(x_int64, min=1)
egr_out3 = paddle.clip(x_f32, min=1)
x_int32 = paddle.to_tensor([1, 2, 3], dtype="int32")
x_int64 = paddle.to_tensor([1, 2, 3], dtype="int64")
x_f32 = paddle.to_tensor([1, 2, 3], dtype="float32")
Expand Down
5 changes: 0 additions & 5 deletions python/paddle/fluid/tests/unittests/test_complex_abs.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

import paddle
import paddle.fluid.dygraph as dg
from paddle.fluid.framework import _test_eager_guard


class TestComplexAbsOp(OpTest):
Expand Down Expand Up @@ -109,10 +108,6 @@ def test_all_positive(self):
y = paddle.abs(paddle.to_tensor(x))
np.testing.assert_allclose(np.abs(x), y.numpy(), rtol=1e-05)

def test_eager(self):
with _test_eager_guard():
self.test_all_positive()


class TestRealAbsOp(OpTest):
def setUp(self):
Expand Down
7 changes: 0 additions & 7 deletions python/paddle/fluid/tests/unittests/test_complex_cast.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
import numpy as np

import paddle
from paddle.fluid.framework import _test_eager_guard


class TestComplexCastOp(unittest.TestCase):
Expand Down Expand Up @@ -80,12 +79,6 @@ def test_complex64_complex128(self):
c_128.cast('complex128').numpy(), c_64.numpy(), rtol=1e-05
)

def test_eager(self):
with _test_eager_guard():
self.test_complex64_complex128()
self.test_real_to_complex()
self.test_complex_to_real()


if __name__ == '__main__':
unittest.main()
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
import paddle
import paddle.fluid as fluid
import paddle.fluid.dygraph as dg
from paddle.fluid.framework import _test_eager_guard

paddle_apis = {
"add": paddle.add,
Expand Down Expand Up @@ -112,12 +111,6 @@ def test_real_x_complex_y(self):
self.compare_by_basic_api(x, y)
self.compare_op_by_basic_api(x, y)

def test_eager(self):
with _test_eager_guard():
self.test_real_x_complex_y()
self.test_complex_x_real_y()
self.test_complex_xy()


if __name__ == '__main__':
unittest.main()
10 changes: 0 additions & 10 deletions python/paddle/fluid/tests/unittests/test_complex_getitem.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@

import paddle.fluid as fluid
import paddle.fluid.dygraph as dg
from paddle.fluid.framework import _test_eager_guard


class TestComplexGetitemLayer(unittest.TestCase):
Expand Down Expand Up @@ -95,15 +94,6 @@ def test_case6(self):

np.testing.assert_allclose(x_var_slice.numpy(), x_np_slice)

def test_eager(self):
with _test_eager_guard():
self.test_case1()
self.test_case2()
self.test_case3()
self.test_case4()
self.test_case5()
self.test_case6()


if __name__ == '__main__':
unittest.main()

0 comments on commit d808f16

Please sign in to comment.