Skip to content

Commit

Permalink
skip failed cases due to #809 (#810)
Browse files Browse the repository at this point in the history
skiped the cases due to pytorch uplift, see #809.

---------

Co-authored-by: Huaiyu, Zheng <huaiyu.zheng@intel.com>
Co-authored-by: Feng Yuan <feng1.yuan@intel.com>
  • Loading branch information
3 people authored and hjhee committed Aug 30, 2024
1 parent 1f2e811 commit 91adfa8
Showing 1 changed file with 12 additions and 0 deletions.
12 changes: 12 additions & 0 deletions test/xpu/skip_list_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -1652,6 +1652,8 @@
"test_matmul_check_entries_tunableop_xpu_float16",
"test_minimum_tuning_iteration_tunableop_xpu_float16",
"test_validator_tunableop_rocm_xpu_float32",
"test_addmm_relu_tunableop_rocm_xpu_float32",
"test_addmm_relu_tunableop_rocm_xpu_float64",
),

"test_ops_fwd_gradients_xpu.py": (
Expand Down Expand Up @@ -1931,6 +1933,10 @@
# NotImplementedError: Could not run 'aten::_to_dense' with arguments from the 'SparseXPU' backend.
"test_fn_fwgrad_bwgrad_to_sparse_xpu_float64",
"test_forward_mode_AD_to_sparse_xpu_float64",

# issue: https://github.com/intel/torch-xpu-ops/issues/809
"test_fn_fwgrad_bwgrad_nn_functional_conv3d_xpu_complex128",
"test_fn_fwgrad_bwgrad_nn_functional_conv3d_xpu_float64",
),

"test_matmul_cuda_xpu.py": (
Expand Down Expand Up @@ -2442,6 +2448,10 @@
### Error #7 in TestBwdGradientsXPU , totally 2 , NotImplementedError: Could not run 'aten::_sparse_coo_tensor_with_dims_and_tensors' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for this backend, or was omitted during the selective/custom build process (if using custom build). If you are a Facebook employee using PyTorch on mobile, please visit https://fburl.com/ptmfixes for possible resolutions. 'aten::_sparse_coo_tensor_with_dims_and_tensors' is only available for these backends: [XPU, Meta, SparseCPU, SparseMeta, BackendSelect, Python, FuncTorchDynamicLayerBackMode, Functionalize, Named, Conjugate, Negative, ZeroTensor, ADInplaceOrView, AutogradOther, AutogradCPU, AutogradCUDA, AutogradHIP, AutogradXLA, AutogradMPS, AutogradIPU, AutogradXPU, AutogradHPU, AutogradVE, AutogradLazy, AutogradMTIA, AutogradPrivateUse1, AutogradPrivateUse2, AutogradPrivateUse3, AutogradMeta, AutogradNestedTensor, Tracer, AutocastCPU, AutocastXPU, AutocastCUDA, FuncTorchBatched, BatchedNestedTensor, FuncTorchVmapMode, Batched, VmapMode, FuncTorchGradWrapper, PythonTLSSnapshot, FuncTorchDynamicLayerFrontMode, PreDispatch, PythonDispatcher].
"test_fn_grad_to_sparse_xpu_float64",
"test_fn_gradgrad_to_sparse_xpu_float64",

# issue: https://github.com/intel/torch-xpu-ops/issues/809
"test_fn_gradgrad_nn_functional_conv3d_xpu_complex128",
"test_fn_gradgrad_nn_functional_conv3d_xpu_float64",
),

"test_torch_xpu.py": (
Expand Down Expand Up @@ -2693,6 +2703,8 @@
# accuracy issue, TODO
"test_Conv2d_naive_groups_xpu_float16",
"test_Conv2d_groups_nobias",
# issue: https://github.com/intel/torch-xpu-ops/issues/809
"test_thnn_conv_strided_padded_dilated",
),

"test_dynamic_shapes_xpu.py": None,
Expand Down

0 comments on commit 91adfa8

Please sign in to comment.