Skip to content

Commit

Permalink
Merge branch 'main' into dependabot-github_actions-pypa-gh-action-pyp…
Browse files Browse the repository at this point in the history
…i-publish-1.12.2
  • Loading branch information
t-vi authored Dec 2, 2024
2 parents ef1df0a + e0d1494 commit 64988c6
Show file tree
Hide file tree
Showing 3 changed files with 8 additions and 4 deletions.
4 changes: 3 additions & 1 deletion thunder/tests/test_jit_general.py
Original file line number Diff line number Diff line change
Expand Up @@ -680,6 +680,8 @@ def test_litgpt_variants(name, device):

if device == "cuda" and not torch.cuda.is_available():
pytest.skip("CUDA not available")
if device == "cuda" and name == "falcon-40b-like":
pytest.skip("NVFuser reenable when https://github.com/NVIDIA/Fuser/issues/3505 is fixed, Thunder issue #1504")
if device == "cuda" and name == "falcon-7b-like":
pytest.skip("NVFuser reenable when https://github.com/NVIDIA/Fuser/issues/3292 is fixed")

Expand Down Expand Up @@ -783,7 +785,7 @@ def sample(logits):
("cpu", "cuda"),
)
def test_tom_overrides_proxy(device):
from litgpt.config import Config
from thunder.tests.litgpt_model import Config
from litgpt.model import GPT

if device == "cuda" and not torch.cuda.is_available():
Expand Down
3 changes: 2 additions & 1 deletion thunder/tests/test_torch_compile_executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ def test_supported_ops_are_in_pytorch_executor():
# appropriate visual studio config.
@pytest.mark.skipif(not is_inductor_supported() or platform.system() == "Windows", reason="inductor unsupported")
def test_torch_compile_litgpt():
from thunder.tests.litgpt_model import Config
from litgpt.model import GPT

model = GPT.from_name("llama1-like", n_layer=1)
Expand All @@ -40,7 +41,7 @@ def test_torch_compile_litgpt():
@requiresCUDA
@pytest.mark.skipif(not device_supports_bf16(torch.device("cuda")), reason="bf16 is not supported")
def test_torch_compile_cat_nvfuser_phi2_tanh():
from litgpt.config import Config
from thunder.tests.litgpt_model import Config
from litgpt.model import GPT

device = torch.device("cuda")
Expand Down
5 changes: 3 additions & 2 deletions thunder/tests/test_transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,8 @@ def _test_equal_nvtx_push_and_pop(trc):
@requiresCUDA
def test_materialization():
from thunder.transforms import MaterializationTransform
from litgpt.config import Config
from thunder.tests.litgpt_model import Config

from litgpt.model import GPT

config = Config.from_name("llama2-like")
Expand Down Expand Up @@ -121,7 +122,7 @@ def test_materialization():
def test_quantization_on_meta():
from thunder.transforms import MaterializationTransform
from thunder.transforms.quantization import BitsAndBytesLinearQuant4bit, get_bitsandbytes_executor
from litgpt.config import Config
from thunder.tests.litgpt_model import Config
from litgpt.model import GPT

bitsandbytes_executor = get_bitsandbytes_executor()
Expand Down

0 comments on commit 64988c6

Please sign in to comment.