diff --git a/test/amp/test_amp_api.py b/test/amp/test_amp_api.py index 7c2a1f870696e..607117c84aa04 100644 --- a/test/amp/test_amp_api.py +++ b/test/amp/test_amp_api.py @@ -20,9 +20,15 @@ import paddle import paddle.nn.functional as F from paddle import nn +from paddle.fluid import core from paddle.static import amp +@unittest.skipIf( + not core.is_compiled_with_cuda() + or paddle.device.cuda.get_device_capability()[0] < 7.0, + "run test when gpu's compute capability is at least 7.0.", +) class TestAutoCast(AmpTestBase): def setUp(self): self._conv = paddle.nn.Conv2D( @@ -56,6 +62,11 @@ def forward(self, x): return out3 +@unittest.skipIf( + not core.is_compiled_with_cuda() + or paddle.device.cuda.get_device_capability()[0] < 7.0, + "run test when gpu's compute capability is at least 7.0.", +) class TestStaticDecorate(AmpTestBase): def check_results( self, use_amp, dtype, level, use_promote, expected_op_calls @@ -127,6 +138,11 @@ def test_static_amp_OD(self): paddle.disable_static() +@unittest.skipIf( + not core.is_compiled_with_cuda() + or paddle.device.cuda.get_device_capability()[0] < 7.0, + "run test when gpu's compute capability is at least 7.0.", +) class TestGradScaler(AmpTestBase): def test_amp_grad_scaler(self): model = paddle.nn.Conv2D(3, 2, 3) @@ -154,6 +170,11 @@ def test_amp_grad_scaler(self): self.assertTrue('check_finite_and_unscale' not in op_list) +@unittest.skipIf( + not core.is_compiled_with_cuda() + or paddle.device.cuda.get_device_capability()[0] < 7.0, + "run test when gpu's compute capability is at least 7.0.", +) class TestFp16Guard(AmpTestBase): def test_fp16_gurad(self): paddle.enable_static() diff --git a/test/amp/test_amp_decorate.py b/test/amp/test_amp_decorate.py index 1a77146cf1de7..58989571aad8b 100644 --- a/test/amp/test_amp_decorate.py +++ b/test/amp/test_amp_decorate.py @@ -16,6 +16,7 @@ import paddle import paddle.nn.functional as F +from paddle.fluid import core class ConvBNLayer(paddle.nn.Layer): @@ -77,6 +78,11 @@ def forward(self, inputs): return x +@unittest.skipIf( + not core.is_compiled_with_cuda() + or paddle.device.cuda.get_device_capability()[0] < 7.0, + "run test when gpu's compute capability is at least 7.0.", +) class TestAMPDecorate(unittest.TestCase): def check_results(self, fp32_layers=[], fp16_layers=[]): for idx in range(len(fp32_layers)): diff --git a/test/amp/test_amp_list.py b/test/amp/test_amp_list.py index e61aa8281eccc..1eb5b1fb7b5b3 100644 --- a/test/amp/test_amp_list.py +++ b/test/amp/test_amp_list.py @@ -19,6 +19,11 @@ from paddle.static.amp import AutoMixedPrecisionLists, fp16_lists +@unittest.skipIf( + not core.is_compiled_with_cuda() + or paddle.device.cuda.get_device_capability()[0] < 7.0, + "run test when gpu's compute capability is at least 7.0.", +) class TestAMPList(unittest.TestCase): def setUp(self): self.default_black_list = [ diff --git a/test/amp/test_amp_master_grad.py b/test/amp/test_amp_master_grad.py index d94923f33f6bf..3eaf6546009d0 100644 --- a/test/amp/test_amp_master_grad.py +++ b/test/amp/test_amp_master_grad.py @@ -35,6 +35,11 @@ def forward(self, x): or not core.is_float16_supported(core.CUDAPlace(0)), "core is not complied with CUDA and not support the float16", ) +@unittest.skipIf( + not core.is_compiled_with_cuda() + or paddle.device.cuda.get_device_capability()[0] < 7.0, + "run test when gpu's compute capability is at least 7.0.", +) class TestMasterGrad(unittest.TestCase): def check_results( self, fp32_grads, op_list, total_steps, accumulate_batchs_num diff --git a/test/amp/test_amp_promote.py b/test/amp/test_amp_promote.py index 9f8395df4137f..95017df905fc9 100644 --- a/test/amp/test_amp_promote.py +++ b/test/amp/test_amp_promote.py @@ -18,9 +18,15 @@ from amp_base_models import AmpTestBase, build_conv_model import paddle +from paddle.fluid import core from paddle.static import amp +@unittest.skipIf( + not core.is_compiled_with_cuda() + or paddle.device.cuda.get_device_capability()[0] < 7.0, + "run test when gpu's compute capability is at least 7.0.", +) class TestStaticAmpPromoteStats(AmpTestBase): def check_promote_results( self, use_amp, dtype, level, use_promote, expected_op_calls, debug_info @@ -103,6 +109,11 @@ def test_static_amp_o2(self): ) +@unittest.skipIf( + not core.is_compiled_with_cuda() + or paddle.device.cuda.get_device_capability()[0] < 7.0, + "run test when gpu's compute capability is at least 7.0.", +) class TestEagerAmpPromoteStats(AmpTestBase): def check_promote_results( self, dtype, level, use_promote, expected_op_calls, debug_info @@ -172,6 +183,11 @@ def test_o2_promote_off(self): ) +@unittest.skipIf( + not core.is_compiled_with_cuda() + or paddle.device.cuda.get_device_capability()[0] < 7.0, + "run test when gpu's compute capability is at least 7.0.", +) class TestEagerAmpPromoteSimple(AmpTestBase): def setUp(self): self._conv = paddle.nn.Conv2D( diff --git a/test/legacy_test/test_optimizer.py b/test/legacy_test/test_optimizer.py index 00babb8ad9d92..7a93fe8f33a01 100644 --- a/test/legacy_test/test_optimizer.py +++ b/test/legacy_test/test_optimizer.py @@ -1254,6 +1254,11 @@ def test_float32(self): self.check_with_dtype('float32') +@unittest.skipIf( + not core.is_compiled_with_cuda() + or paddle.device.cuda.get_device_capability()[0] < 7.0, + "run test when gpu's compute capability is at least 7.0.", +) class TestMasterWeightSaveForFP16(unittest.TestCase): ''' For Amp-O2, some optimizer(Momentum, Adam ...) will create master weights for parameters to improve the accuracy.