Skip to content

Commit

Permalink
[HFLM]Add support for Ascend NPU
Browse files Browse the repository at this point in the history
Co-authored-by: jiaqiw09 <[email protected]>
Co-authored-by: zhabuyu <[email protected]>
  • Loading branch information
3 people committed May 25, 2024
1 parent 78a215e commit f54265d
Showing 1 changed file with 5 additions and 0 deletions.
5 changes: 5 additions & 0 deletions lm_eval/models/huggingface.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,12 +153,16 @@ def __init__(
if accelerator.num_processes > 1:
self.accelerator = accelerator

if "npu" in accelerator.device.type:
gpus = torch.npu.device_count()

if not (parallelize or accelerator.num_processes > 1):
# use user-passed device
device_list = set(
["cuda", "cpu"]
+ [f"cuda:{i}" for i in range(gpus)]
+ ["mps", "mps:0"]
+ [f"npu:{i}" for i in range(gpus)]
)
if device and device in device_list:
self._device = torch.device(device)
Expand Down Expand Up @@ -323,6 +327,7 @@ def __init__(
in [
DistributedType.FSDP,
DistributedType.MULTI_GPU,
DistributedType.MULTI_NPU,
]
), "Unsupported distributed type provided. Only DDP and FSDP are supported."
if accelerator.distributed_type == DistributedType.FSDP:
Expand Down

0 comments on commit f54265d

Please sign in to comment.