@@ -324,21 +324,26 @@ def run(self) -> None:
324
324
325
325
326
326
def _is_hpu () -> bool :
327
- is_hpu_available = True
327
+ # if VLLM_TARGET_DEVICE env var was set explicitly, skip HPU autodetection
328
+ if os .getenv ("VLLM_TARGET_DEVICE" , None ) == VLLM_TARGET_DEVICE :
329
+ return VLLM_TARGET_DEVICE == "hpu"
330
+
331
+ # if VLLM_TARGET_DEVICE was not set explicitly, check if hl-smi succeeds,
332
+ # and if it doesn't, check if habanalabs driver is loaded
333
+ is_hpu_available = False
328
334
try :
329
- subprocess .run (["hl-smi" ], capture_output = True , check = True )
335
+ out = subprocess .run (["hl-smi" ], capture_output = True , check = True )
336
+ is_hpu_available = out .returncode == 0
330
337
except (FileNotFoundError , PermissionError , subprocess .CalledProcessError ):
331
- if not os .path .exists ('/dev/accel/accel0' ) and not os .path .exists (
332
- '/dev/accel/accel_controlD0' ):
333
- # last resort...
338
+ if sys .platform .startswith ("linux" ):
334
339
try :
335
340
output = subprocess .check_output (
336
341
'lsmod | grep habanalabs | wc -l' , shell = True )
337
342
is_hpu_available = int (output ) > 0
338
343
except (ValueError , FileNotFoundError , PermissionError ,
339
344
subprocess .CalledProcessError ):
340
- is_hpu_available = False
341
- return is_hpu_available or VLLM_TARGET_DEVICE == "hpu"
345
+ pass
346
+ return is_hpu_available
342
347
343
348
344
349
def _no_device () -> bool :
0 commit comments