You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Hi, I got this error when "import spconv.pytorch as spconv": any ideas? cumm and spconv were built from GIT
My environment: CUDA 12.4
NVIDIA A100-SXM4-40GB
RuntimeError Traceback (most recent call last)
Cell In[1], line 1
----> 1 import spconv.pytorch as spconv
File ~/yanai/spconv/spconv/pytorch/functional.py:24
22 from spconv.pytorch.core import SparseConvTensor
23 from spconv.tools import CUDAKernelTimer
---> 24 from spconv.pytorch import ops, SparseConvTensor
25 from spconv.pytorch.constants import PYTORCH_VERSION
26 from spconv.debug_utils import spconv_save_debug_data
File ~/yanai/spconv/spconv/pytorch/ops.py:38
35 from spconv.utils import nullcontext
37 if not CPU_ONLY_BUILD:
---> 38 from spconv.algo import GEMM, CONV, GEMM_CPP, CONV_CPP
39 else:
40 GEMM = None
File ~/yanai/spconv/spconv/algo.py:1049
1045 def stream_synchronize(self, stream: int):
1046 return GemmMainUnitTest.stream_synchronize(stream)
-> 1049 GEMM = SimpleGemm(ALL_ALGO_DESPS)
1050 CONV = SimpleConv(ALL_CONV_ALGO_DESPS)
1052 GEMM_CPP = GemmTunerSimple([
1053 algocore.get_gemm_algo_desp_from_param(p)
1054 for p in ALL_NATIVE_PARAMS])
File ~/yanai/spconv/spconv/algo.py:224, in SimpleGemm.init(self, prebuilt_desps)
219 all_desps = [
220 algocore.get_gemm_algo_desp_from_param(p)
221 for p in ALL_NATIVE_PARAMS
222 ]
223 self.prebuilt_desps = prebuilt_desps
--> 224 self.prebuilt_desp_names = {str(d) for d in prebuilt_desps}
225 if SPCONV_DEBUG_NVRTC_KERNELS:
226 self.prebuilt_desp_names.clear()
RuntimeError: /io/include/tensorview/gemm/core/params.h(97)
tensorop[i] > 0 assert faild. tensorop must be set, but they are [4, 0, 1953327443]
The text was updated successfully, but these errors were encountered:
Hi, I got this error when "import spconv.pytorch as spconv": any ideas? cumm and spconv were built from GIT
My environment:
CUDA 12.4
NVIDIA A100-SXM4-40GB
RuntimeError Traceback (most recent call last)
Cell In[1], line 1
----> 1 import spconv.pytorch as spconv
File ~/yanai/spconv/spconv/pytorch/init.py:8
6 import torch
7 from spconv.pytorch.core import SparseConvTensor
----> 8 from spconv.pytorch import functional, ops
9 from spconv.pytorch.conv import (SparseConv1d, SparseConv2d, SparseConv3d,
10 SparseConv4d, SparseConvTranspose1d,
11 SparseConvTranspose2d, SparseConvTranspose3d,
(...)
14 SparseInverseConv4d, SubMConv1d, SubMConv2d,
15 SubMConv3d, SubMConv4d)
16 from spconv.pytorch.identity import Identity
File ~/yanai/spconv/spconv/pytorch/functional.py:24
22 from spconv.pytorch.core import SparseConvTensor
23 from spconv.tools import CUDAKernelTimer
---> 24 from spconv.pytorch import ops, SparseConvTensor
25 from spconv.pytorch.constants import PYTORCH_VERSION
26 from spconv.debug_utils import spconv_save_debug_data
File ~/yanai/spconv/spconv/pytorch/ops.py:38
35 from spconv.utils import nullcontext
37 if not CPU_ONLY_BUILD:
---> 38 from spconv.algo import GEMM, CONV, GEMM_CPP, CONV_CPP
39 else:
40 GEMM = None
File ~/yanai/spconv/spconv/algo.py:1049
1045 def stream_synchronize(self, stream: int):
1046 return GemmMainUnitTest.stream_synchronize(stream)
-> 1049 GEMM = SimpleGemm(ALL_ALGO_DESPS)
1050 CONV = SimpleConv(ALL_CONV_ALGO_DESPS)
1052 GEMM_CPP = GemmTunerSimple([
1053 algocore.get_gemm_algo_desp_from_param(p)
1054 for p in ALL_NATIVE_PARAMS])
File ~/yanai/spconv/spconv/algo.py:224, in SimpleGemm.init(self, prebuilt_desps)
219 all_desps = [
220 algocore.get_gemm_algo_desp_from_param(p)
221 for p in ALL_NATIVE_PARAMS
222 ]
223 self.prebuilt_desps = prebuilt_desps
--> 224 self.prebuilt_desp_names = {str(d) for d in prebuilt_desps}
225 if SPCONV_DEBUG_NVRTC_KERNELS:
226 self.prebuilt_desp_names.clear()
RuntimeError: /io/include/tensorview/gemm/core/params.h(97)
tensorop[i] > 0 assert faild. tensorop must be set, but they are [4, 0, 1953327443]
The text was updated successfully, but these errors were encountered: