diff --git a/.github/workflows/test_executorch_export.yml b/.github/workflows/test_executorch_export.yml index 771da52ca5..1571cd0cff 100644 --- a/.github/workflows/test_executorch_export.yml +++ b/.github/workflows/test_executorch_export.yml @@ -16,7 +16,7 @@ jobs: fail-fast: false matrix: python-version: ['3.10', '3.11', '3.12'] - os: [ubuntu-20.04-16-core, macos-15] + os: [macos-15] runs-on: ${{ matrix.os }} steps: diff --git a/.github/workflows/test_executorch_runtime.yml b/.github/workflows/test_executorch_runtime.yml index f7e3abccef..3aea14f4ee 100644 --- a/.github/workflows/test_executorch_runtime.yml +++ b/.github/workflows/test_executorch_runtime.yml @@ -16,7 +16,7 @@ jobs: fail-fast: false matrix: python-version: ['3.10', '3.11', '3.12'] - os: [ubuntu-20.04, macos-15] + os: [macos-15] runs-on: ${{ matrix.os }} steps: diff --git a/tests/executorch/runtime/test_modeling.py b/tests/executorch/runtime/test_modeling.py index ff4c96f9e8..a73c82a496 100644 --- a/tests/executorch/runtime/test_modeling.py +++ b/tests/executorch/runtime/test_modeling.py @@ -73,9 +73,8 @@ def test_load_model_from_local_path(self): @slow @pytest.mark.run_slow - @require_read_token def test_llama3_2_1b_text_generation_with_xnnpack(self): - model_id = "meta-llama/Llama-3.2-1B" + model_id = "NousResearch/Llama-3.2-1B" model = ExecuTorchModelForCausalLM.from_pretrained( model_name_or_path=model_id, export=True, @@ -96,9 +95,8 @@ def test_llama3_2_1b_text_generation_with_xnnpack(self): @slow @pytest.mark.run_slow - @require_read_token def test_llama3_2_3b_text_generation_with_xnnpack(self): - model_id = "meta-llama/Llama-3.2-3B" + model_id = "NousResearch/Hermes-3-Llama-3.2-3B" model = ExecuTorchModelForCausalLM.from_pretrained( model_name_or_path=model_id, export=True, @@ -123,7 +121,6 @@ def test_llama3_2_3b_text_generation_with_xnnpack(self): @slow @pytest.mark.run_slow - @require_read_token def test_qwen2_5_text_generation_with_xnnpack(self): model_id = "Qwen/Qwen2.5-0.5B" model = ExecuTorchModelForCausalLM.from_pretrained( @@ -146,9 +143,9 @@ def test_qwen2_5_text_generation_with_xnnpack(self): @slow @pytest.mark.run_slow - @require_read_token def test_gemma2_text_generation_with_xnnpack(self): - model_id = "google/gemma-2-2b" + # model_id = "google/gemma-2-2b" + model_id = "unsloth/gemma-2-2b-it" model = ExecuTorchModelForCausalLM.from_pretrained( model_name_or_path=model_id, export=True, @@ -169,9 +166,9 @@ def test_gemma2_text_generation_with_xnnpack(self): @slow @pytest.mark.run_slow - @require_read_token def test_gemma_text_generation_with_xnnpack(self): - model_id = "google/gemma-2b" + # model_id = "google/gemma-2b" + model_id = "weqweasdas/RM-Gemma-2B" model = ExecuTorchModelForCausalLM.from_pretrained( model_name_or_path=model_id, export=True, @@ -192,7 +189,6 @@ def test_gemma_text_generation_with_xnnpack(self): @slow @pytest.mark.run_slow - @require_read_token def test_olmo_text_generation_with_xnnpack(self): model_id = "allenai/OLMo-1B-hf" model = ExecuTorchModelForCausalLM.from_pretrained(