From 0b65730079a18e16398fed6079effdbbeec17585 Mon Sep 17 00:00:00 2001 From: Ilya Enkovich Date: Fri, 6 Sep 2024 10:28:33 -0500 Subject: [PATCH] Remove old LLVM bug workaround. (#141) Signed-off-by: Ilya Enkovich --- python/test/unit/language/test_core.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/python/test/unit/language/test_core.py b/python/test/unit/language/test_core.py index 8c3d2410dc7e..73fe68050896 100644 --- a/python/test/unit/language/test_core.py +++ b/python/test/unit/language/test_core.py @@ -2236,12 +2236,6 @@ def kernel(X, Z, BLOCK: tl.constexpr): def test_reduce(op, dtype_str, shape, axis, keep_dims, num_ctas, device): check_type_supported(dtype_str, device) # bfloat16 on cc < 80 will not be tested - # fpext fp16->fp32 is broken in LLVM for large vectors: - # https://github.com/llvm/llvm-project/issues/95278 - # TODO: remove the change after the bug is fixed. - if is_cpu() and dtype_str == "float16": - shape = (min(shape[0], 512), min(shape[1], 512)) - @triton.jit def kernel(X, Z, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr, IS_3D: tl.constexpr, AXIS: tl.constexpr, KEEP_DIMS: tl.constexpr, USE_I1: tl.constexpr):