Skip to content

Commit

Permalink
Merge branch 'master' into loadams/unpin-transformers-download-model
Browse files Browse the repository at this point in the history
  • Loading branch information
loadams authored Dec 12, 2024
2 parents bb9e3ce + 9182947 commit 0678018
Showing 1 changed file with 6 additions and 1 deletion.
7 changes: 6 additions & 1 deletion accelerator/real_accelerator.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,12 @@ def get_accelerator():
import torch

# Determine if we are on a GPU or x86 CPU with torch.
if torch.cuda.is_available(): #ignore-cuda
# "torch.cuda.is_available()" provides a stronger guarantee, #ignore-cuda
# ensuring that we are free from CUDA initialization errors.
# While "torch.cuda.device_count() > 0" check ensures that #ignore-cuda
# we won't try to do any CUDA calls when no device is available
# For reference: https://github.com/microsoft/DeepSpeed/pull/6810
if torch.cuda.device_count() > 0 and torch.cuda.is_available(): #ignore-cuda
accelerator_name = "cuda"
else:
if accel_logger is not None:
Expand Down

0 comments on commit 0678018

Please sign in to comment.