Skip to content

Commit

Permalink
Test with public models and disable linux CI
Browse files Browse the repository at this point in the history
  • Loading branch information
michaelbenayoun committed Dec 17, 2024
1 parent 0d9322a commit 3c5f757
Show file tree
Hide file tree
Showing 3 changed files with 8 additions and 12 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/test_executorch_export.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ jobs:
fail-fast: false
matrix:
python-version: ['3.10', '3.11', '3.12']
os: [ubuntu-20.04-16-core, macos-15]
os: [macos-15]

runs-on: ${{ matrix.os }}
steps:
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/test_executorch_runtime.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ jobs:
fail-fast: false
matrix:
python-version: ['3.10', '3.11', '3.12']
os: [ubuntu-20.04, macos-15]
os: [macos-15]

runs-on: ${{ matrix.os }}
steps:
Expand Down
16 changes: 6 additions & 10 deletions tests/executorch/runtime/test_modeling.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,9 +73,8 @@ def test_load_model_from_local_path(self):

@slow
@pytest.mark.run_slow
@require_read_token
def test_llama3_2_1b_text_generation_with_xnnpack(self):
model_id = "meta-llama/Llama-3.2-1B"
model_id = "NousResearch/Llama-3.2-1B"
model = ExecuTorchModelForCausalLM.from_pretrained(
model_name_or_path=model_id,
export=True,
Expand All @@ -96,9 +95,8 @@ def test_llama3_2_1b_text_generation_with_xnnpack(self):

@slow
@pytest.mark.run_slow
@require_read_token
def test_llama3_2_3b_text_generation_with_xnnpack(self):
model_id = "meta-llama/Llama-3.2-3B"
model_id = "NousResearch/Hermes-3-Llama-3.2-3B"
model = ExecuTorchModelForCausalLM.from_pretrained(
model_name_or_path=model_id,
export=True,
Expand All @@ -123,7 +121,6 @@ def test_llama3_2_3b_text_generation_with_xnnpack(self):

@slow
@pytest.mark.run_slow
@require_read_token
def test_qwen2_5_text_generation_with_xnnpack(self):
model_id = "Qwen/Qwen2.5-0.5B"
model = ExecuTorchModelForCausalLM.from_pretrained(
Expand All @@ -146,9 +143,9 @@ def test_qwen2_5_text_generation_with_xnnpack(self):

@slow
@pytest.mark.run_slow
@require_read_token
def test_gemma2_text_generation_with_xnnpack(self):
model_id = "google/gemma-2-2b"
# model_id = "google/gemma-2-2b"
model_id = "unsloth/gemma-2-2b-it"
model = ExecuTorchModelForCausalLM.from_pretrained(
model_name_or_path=model_id,
export=True,
Expand All @@ -169,9 +166,9 @@ def test_gemma2_text_generation_with_xnnpack(self):

@slow
@pytest.mark.run_slow
@require_read_token
def test_gemma_text_generation_with_xnnpack(self):
model_id = "google/gemma-2b"
# model_id = "google/gemma-2b"
model_id = "weqweasdas/RM-Gemma-2B"
model = ExecuTorchModelForCausalLM.from_pretrained(
model_name_or_path=model_id,
export=True,
Expand All @@ -192,7 +189,6 @@ def test_gemma_text_generation_with_xnnpack(self):

@slow
@pytest.mark.run_slow
@require_read_token
def test_olmo_text_generation_with_xnnpack(self):
model_id = "allenai/OLMo-1B-hf"
model = ExecuTorchModelForCausalLM.from_pretrained(
Expand Down

0 comments on commit 3c5f757

Please sign in to comment.