Skip to content

Commit

Permalink
Adjust threshold for nn.ConvTranspose3d unit test (#1372)
Browse files Browse the repository at this point in the history
Adjust threshold for nn.ConvTranspose3d unit test.

---------

Co-authored-by: Yutao Xu <[email protected]>
  • Loading branch information
daisyden and xytintel authored Feb 18, 2025
1 parent 9712a05 commit a14d1ea
Show file tree
Hide file tree
Showing 2 changed files with 39 additions and 34 deletions.
68 changes: 34 additions & 34 deletions test/xpu/skip_list_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -1740,40 +1740,40 @@
"test_fn_fwgrad_bwgrad_nn_functional_conv3d_xpu_complex128",
"test_fn_fwgrad_bwgrad_nn_functional_conv3d_xpu_float64",
),
"test_matmul_cuda_xpu.py": (
# AssertionError: "Bias is not supported when out_dtype is set to Float32" does not match "Could not run 'aten::_scaled_mm' with arguments from the 'CPU' backend.
"test_float32_output_errors_with_bias_xpu",
# RuntimeError: "eye" not implemented for 'Float8_e4m3fn'
"test_float8_basics_xpu",
# AssertionError: "For row-wise scaling, scale_a must be size 1024 but got 1 and scale_b must be size 2048 but got 2" does not match "Could not run 'aten::_scaled_mm' with arguments from the 'CPU' backend.
"test_float8_error_messages_xpu",
# NotImplementedError: Could not run 'aten::_scaled_mm' with arguments from the 'CPU' backend.
"test_float8_bias_relu_edgecase_xpu",
"test_float8_bias_xpu",
"test_float8_rowwise_scaling_sanity_use_fast_accum_False_xpu",
"test_float8_rowwise_scaling_sanity_use_fast_accum_True_xpu",
"test_float8_scale_fast_accum_xpu",
"test_float8_scale_xpu",
"test_non_divisible_leading_dim_bias_False_xpu",
"test_non_divisible_leading_dim_bias_True_xpu",
"test_scaled_mm_change_stride_bfloat16_xpu",
"test_scaled_mm_change_stride_float16_xpu",
"test_scaled_mm_change_stride_float32_xpu",
"test_scaled_mm_vs_emulated_bfloat16_xpu",
"test_scaled_mm_vs_emulated_float16_xpu",
"test_scaled_mm_vs_emulated_float32_xpu",
"test_scaled_mm_vs_emulated_row_wise_bfloat16_xpu",
# AssertionError: Torch not compiled with CUDA enabled
"test_zero_dim_tensorwise_which_dim_zero",
# New added case in 2.7
"test_cublas_addmm_reduced_precision_fp16_accumulate_size_10000_xpu_bfloat16",
"test_cublas_addmm_reduced_precision_fp16_accumulate_size_10000_xpu_float16",
"test_cublas_addmm_reduced_precision_fp16_accumulate_size_1000_xpu_bfloat16",
"test_cublas_addmm_reduced_precision_fp16_accumulate_size_1000_xpu_float16",
"test_cublas_addmm_reduced_precision_fp16_accumulate_size_100_xpu_bfloat16",
"test_cublas_addmm_reduced_precision_fp16_accumulate_size_100_xpu_float16",
"test_cublas_and_lt_reduced_precision_fp16_accumulate_xpu",
),
# "test_matmul_cuda_xpu.py": (
# # AssertionError: "Bias is not supported when out_dtype is set to Float32" does not match "Could not run 'aten::_scaled_mm' with arguments from the 'CPU' backend.
# "test_float32_output_errors_with_bias_xpu",
# # RuntimeError: "eye" not implemented for 'Float8_e4m3fn'
# "test_float8_basics_xpu",
# # AssertionError: "For row-wise scaling, scale_a must be size 1024 but got 1 and scale_b must be size 2048 but got 2" does not match "Could not run 'aten::_scaled_mm' with arguments from the 'CPU' backend.
# "test_float8_error_messages_xpu",
# # NotImplementedError: Could not run 'aten::_scaled_mm' with arguments from the 'CPU' backend.
# "test_float8_bias_relu_edgecase_xpu",
# "test_float8_bias_xpu",
# "test_float8_rowwise_scaling_sanity_use_fast_accum_False_xpu",
# "test_float8_rowwise_scaling_sanity_use_fast_accum_True_xpu",
# "test_float8_scale_fast_accum_xpu",
# "test_float8_scale_xpu",
# "test_non_divisible_leading_dim_bias_False_xpu",
# "test_non_divisible_leading_dim_bias_True_xpu",
# "test_scaled_mm_change_stride_bfloat16_xpu",
# "test_scaled_mm_change_stride_float16_xpu",
# "test_scaled_mm_change_stride_float32_xpu",
# "test_scaled_mm_vs_emulated_bfloat16_xpu",
# "test_scaled_mm_vs_emulated_float16_xpu",
# "test_scaled_mm_vs_emulated_float32_xpu",
# "test_scaled_mm_vs_emulated_row_wise_bfloat16_xpu",
# # AssertionError: Torch not compiled with CUDA enabled
# "test_zero_dim_tensorwise_which_dim_zero",
# # New added case in 2.7
# "test_cublas_addmm_reduced_precision_fp16_accumulate_size_10000_xpu_bfloat16",
# "test_cublas_addmm_reduced_precision_fp16_accumulate_size_10000_xpu_float16",
# "test_cublas_addmm_reduced_precision_fp16_accumulate_size_1000_xpu_bfloat16",
# "test_cublas_addmm_reduced_precision_fp16_accumulate_size_1000_xpu_float16",
# "test_cublas_addmm_reduced_precision_fp16_accumulate_size_100_xpu_bfloat16",
# "test_cublas_addmm_reduced_precision_fp16_accumulate_size_100_xpu_float16",
# "test_cublas_and_lt_reduced_precision_fp16_accumulate_xpu",
# ),
"test_maskedtensor_xpu.py": None,
"quantization/core/test_quantized_op_xpu.py": (
# AssertionError: Torch not compiled with CUDA enabled
Expand Down
5 changes: 5 additions & 0 deletions test/xpu/xpu_test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -424,6 +424,11 @@
torch.float32: tol(atol=3e-5, rtol=5e-5),
}
},
"nn.ConvTranspose3d": {
("TestModule", "test_non_contiguous_tensors"): {
torch.float32: tol(atol=2e-5, rtol=5e-5),
}
},
}


Expand Down

0 comments on commit a14d1ea

Please sign in to comment.