diff --git a/tests/python/unittest/test_operator.py b/tests/python/unittest/test_operator.py index 2e467f509fbf..aae82086eb65 100644 --- a/tests/python/unittest/test_operator.py +++ b/tests/python/unittest/test_operator.py @@ -38,6 +38,7 @@ def check_rnn_consistency(cell1, cell2, T, N, I, H, grad_req, rtol=1e-2, atol=1e-4): if default_context().device_type == 'cpu': # NOTE(zixuanweeei): Currently, we don't add `add` requests support on fused mkl-dnn rnn operator. + # We tracked this issue by https://github.com/apache/incubator-mxnet/issues/16578 if isinstance(grad_req, dict) and 'add' in grad_req.values(): print("Skip the test when requiring `add` operation against gradients on CPU context.") return @@ -257,6 +258,9 @@ def test_rnntanh_bidirectional(): @with_seed() @assert_raises_cudnn_not_satisfied(min_version='5.1.10') def test_rnnrelu_sym(): + if default_context().device_type == 'gpu': + print("Skip test `rnn_relu_sym` on gpu. This is tracked by https://github.com/apache/incubator-mxnet/issues/16548") + return Ts = [1, 5] Ns = [1, 32] Is = [32, 128, 512]