Skip to content

Commit

Permalink
rm unittests eager guard tests part5 dataloader2dygraph_mnist (#48816)
Browse files Browse the repository at this point in the history
  • Loading branch information
yjjiang11 authored Dec 12, 2022
1 parent f53e5a0 commit 737fbdb
Show file tree
Hide file tree
Showing 10 changed files with 8 additions and 91 deletions.
22 changes: 3 additions & 19 deletions python/paddle/fluid/tests/unittests/test_dataloader_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,11 @@

import paddle
import paddle.vision.transforms as transforms
from paddle.fluid.framework import _test_eager_guard
from paddle.io import Dataset


class TestDatasetAbstract(unittest.TestCase):
def func_test_main(self):
def test_main(self):
dataset = Dataset()
try:
d = dataset[0]
Expand All @@ -36,11 +35,6 @@ def func_test_main(self):
except NotImplementedError:
pass

def test_main(self):
with _test_eager_guard():
self.func_test_main()
self.func_test_main()


class TestDatasetWithDiffOutputPlace(unittest.TestCase):
def get_dataloader(self, num_workers):
Expand Down Expand Up @@ -68,7 +62,7 @@ def run_check_on_cpu(self):
self.assertTrue(label.place.is_cpu_place())
break

def func_test_single_process(self):
def test_single_process(self):
self.run_check_on_cpu()
if paddle.is_compiled_with_cuda():
# Get (image, label) tuple from MNIST dataset
Expand All @@ -80,12 +74,7 @@ def func_test_single_process(self):
self.assertTrue(label.place.is_cuda_pinned_place())
break

def test_single_process(self):
with _test_eager_guard():
self.func_test_single_process()
self.func_test_single_process()

def func_test_multi_process(self):
def test_multi_process(self):
# DataLoader with multi-process mode is not supported on MacOs and Windows currently
if sys.platform != 'darwin' and sys.platform != 'win32':
self.run_check_on_cpu()
Expand All @@ -99,11 +88,6 @@ def func_test_multi_process(self):
self.assertTrue(label.place.is_cuda_pinned_place())
break

def test_multi_process(self):
with _test_eager_guard():
self.func_test_multi_process()
self.func_test_multi_process()


if __name__ == '__main__':
unittest.main()
9 changes: 0 additions & 9 deletions python/paddle/fluid/tests/unittests/test_deform_conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

import paddle
import paddle.nn.initializer as I
from paddle.fluid.framework import _test_eager_guard


class TestDeformConv2D(TestCase):
Expand Down Expand Up @@ -233,10 +232,6 @@ def test_identity(self):
self.place = paddle.CUDAPlace(0)
self._test_identity()

def test_identity_with_eager_guard(self):
with _test_eager_guard():
self.test_identity()


class TestDeformConv2DFunctional(TestCase):
batch_size = 4
Expand Down Expand Up @@ -544,10 +539,6 @@ def test_identity(self):
self.place = paddle.CUDAPlace(0)
self._test_identity()

def test_identity_with_eager_guard(self):
with _test_eager_guard():
self.test_identity()


# testcases for DeformConv2D
class TestDeformConv2DWithPadding(TestDeformConv2D):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
from op_test import OpTest

import paddle
from paddle.fluid.framework import _test_eager_guard

paddle.enable_static()

Expand Down Expand Up @@ -442,10 +441,6 @@ def test_invalid_filter():

self.assertRaises(ValueError, test_invalid_filter)

def test_error_with_eager_guard(self):
with _test_eager_guard():
self.test_error()


class TestDeformConv2DAPI(unittest.TestCase):
def test_api(self):
Expand Down Expand Up @@ -484,10 +479,6 @@ def test_deform_conv2d_v2():

test_deform_conv2d_v2()

def test_api_with_eager_guard(self):
with _test_eager_guard():
self.test_api()


if __name__ == '__main__':
unittest.main()
5 changes: 0 additions & 5 deletions python/paddle/fluid/tests/unittests/test_determinant_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
from op_test import OpTest

import paddle
from paddle.fluid.framework import _test_eager_guard

paddle.enable_static()

Expand Down Expand Up @@ -87,10 +86,6 @@ def test_api_dygraph(self):
np.testing.assert_allclose(out.numpy(), out_ref, rtol=0.001)
paddle.enable_static()

def test_eager(self):
with _test_eager_guard():
self.test_api_dygraph()


class TestSlogDeterminantOp(OpTest):
def setUp(self):
Expand Down
5 changes: 0 additions & 5 deletions python/paddle/fluid/tests/unittests/test_diag_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
from paddle.fluid.framework import _test_eager_guard


class TestDiagV2Op(OpTest):
Expand Down Expand Up @@ -281,8 +280,6 @@ def run_static(self, use_gpu=False):
def test_cpu(self):
paddle.disable_static(place=paddle.fluid.CPUPlace())
self.run_imperative()
with _test_eager_guard():
self.run_imperative()

paddle.enable_static()

Expand All @@ -295,8 +292,6 @@ def test_gpu(self):

paddle.disable_static(place=paddle.fluid.CUDAPlace(0))
self.run_imperative()
with _test_eager_guard():
self.run_imperative()
paddle.enable_static()

with fluid.program_guard(fluid.Program()):
Expand Down
16 changes: 5 additions & 11 deletions python/paddle/fluid/tests/unittests/test_diagonal_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
from op_test import OpTest

import paddle
from paddle.fluid.framework import _test_eager_guard

paddle.enable_static()

Expand Down Expand Up @@ -157,12 +156,11 @@ def test_api_dygraph(self):

def test_api_eager(self):
paddle.disable_static(self.place)
with _test_eager_guard():
x_tensor = paddle.to_tensor(self.x)
out = paddle.diagonal(x_tensor)
out2 = paddle.diagonal(x_tensor, offset=0, axis1=2, axis2=1)
out3 = paddle.diagonal(x_tensor, offset=1, axis1=0, axis2=1)
out4 = paddle.diagonal(x_tensor, offset=0, axis1=1, axis2=2)
x_tensor = paddle.to_tensor(self.x)
out = paddle.diagonal(x_tensor)
out2 = paddle.diagonal(x_tensor, offset=0, axis1=2, axis2=1)
out3 = paddle.diagonal(x_tensor, offset=1, axis1=0, axis2=1)
out4 = paddle.diagonal(x_tensor, offset=0, axis1=1, axis2=2)
out_ref = np.diagonal(self.x)
np.testing.assert_allclose(out.numpy(), out_ref, rtol=1e-08)
out2_ref = np.diagonal(self.x, offset=0, axis1=2, axis2=1)
Expand All @@ -174,10 +172,6 @@ def test_api_eager(self):

paddle.enable_static()

def test_api_eager_dygraph(self):
with _test_eager_guard():
self.test_api_dygraph()


if __name__ == '__main__':
unittest.main()
7 changes: 0 additions & 7 deletions python/paddle/fluid/tests/unittests/test_diff_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard


class TestDiffOp(unittest.TestCase):
Expand Down Expand Up @@ -75,9 +74,6 @@ def func_dygraph(self):
self.assertTrue((out.numpy() == self.output).all(), True)

def test_dygraph(self):
with _test_eager_guard():
self.setUp()
self.func_dygraph()
self.setUp()
self.func_dygraph()

Expand Down Expand Up @@ -145,9 +141,6 @@ def func_grad(self):
raise RuntimeError("Check Diff Gradient Failed")

def test_grad(self):
with _test_eager_guard():
self.setUp()
self.func_grad()
self.setUp()
self.func_grad()

Expand Down
12 changes: 0 additions & 12 deletions python/paddle/fluid/tests/unittests/test_digamma_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
import paddle
import paddle.fluid as fluid
import paddle.static as static
from paddle.fluid.framework import _test_eager_guard


class TestDigammaOp(OpTest):
Expand Down Expand Up @@ -95,10 +94,6 @@ def test_in_dynamic_mode(self):
res = paddle.digamma(input_t).numpy()
np.testing.assert_allclose(res, sc_res, rtol=1e-05)

def test_in_eager_dynamic_mode(self):
with _test_eager_guard():
self.test_in_dynamic_mode()

def test_name_argument(self):
with static.program_guard(static.Program()):
x = static.data(name="x", shape=self._shape, dtype=self.dtypes[0])
Expand All @@ -119,13 +114,6 @@ def test_dtype_error(self):
input_t = paddle.to_tensor(input)
res = paddle.digamma(input_t)

with self.assertRaises(RuntimeError):
with fluid.dygraph.guard():
with _test_eager_guard():
input = np.random.random(self._shape).astype("int32")
input_t = paddle.to_tensor(input)
res = paddle.digamma(input_t)


if __name__ == "__main__":
unittest.main()
11 changes: 0 additions & 11 deletions python/paddle/fluid/tests/unittests/test_dist_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@
import paddle.fluid.dygraph as dygraph
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
from paddle.fluid import compiler
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.incubate.fleet.collective import DistributedStrategy, fleet

RUN_STEP = 5
Expand Down Expand Up @@ -1718,16 +1717,6 @@ def check_with_place(
log_name="",
):
if self._dygraph and (self._gloo_mode or self._nccl2_mode):
need_envs.update({"FLAGS_enable_eager_mode": "1"})
with _test_eager_guard():
self.check_with_place_func(
model_file=model_file,
delta=delta,
check_error_log=check_error_log,
need_envs=need_envs,
log_name=log_name,
)
need_envs.update({"FLAGS_enable_eager_mode": "0"})
self.check_with_place_func(
model_file=model_file,
delta=delta,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@

import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear


Expand Down Expand Up @@ -136,8 +135,6 @@ def func_mnist_fp16(self):
print(loss.numpy())

def test_mnist_fp16(self):
with _test_eager_guard():
self.func_mnist_fp16()
self.func_mnist_fp16()


Expand Down

0 comments on commit 737fbdb

Please sign in to comment.