From 3e319d6ecc63e7de1f84201a77acb2b1fecbfde1 Mon Sep 17 00:00:00 2001 From: zhulin1 Date: Tue, 5 Mar 2024 10:40:07 +0800 Subject: [PATCH] fix condition --- .github/workflows/daily_ete_test.yml | 16 ++++++++-------- .../tools/pipeline/test_pipeline_chat_pytorch.py | 2 +- .../pipeline/test_pipeline_chat_turbomind.py | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/daily_ete_test.yml b/.github/workflows/daily_ete_test.yml index d7c9bee4aa..ce9b30470a 100644 --- a/.github/workflows/daily_ete_test.yml +++ b/.github/workflows/daily_ete_test.yml @@ -95,17 +95,17 @@ jobs: lmdeploy check_env - name: Test lmdeploy - quantization w4a16 continue-on-error: true - if: contains(fromJSON(${{github.event.inputs.backend}}), 'turbomind') && contains(fromJSON(${{github.event.inputs.model}}), 'quantization') + if: contains(fromJSON(${{github.event.inputs.backend}}), 'turbomind') && contains(fromJSON(${{github.event.inputs.model}}), 'quantization') run: | pytest autotest/tools/quantization/test_quantization_w4a16.py -m 'not pr_test' -n 8 --alluredir=allure-results --clean-alluredir - name: Test lmdeploy - quantization kv int8 continue-on-error: true - if: contains(fromJSON(${{github.event.inputs.backend}}), 'turbomind') && contains(fromJSON(${{github.event.inputs.model}}), 'quantization') + if: contains(fromJSON(${{github.event.inputs.backend}}), 'turbomind') && contains(fromJSON(${{github.event.inputs.model}}), 'quantization') run: | pytest autotest/tools/quantization/test_quantization_kvint8.py -n 8 --alluredir=allure-results - name: Test lmdeploy - quantization w8a8 continue-on-error: true - if: contains(fromJSON(${{github.event.inputs.backend}}), 'pytorch') && contains(fromJSON(${{github.event.inputs.model}}), 'quantization') + if: contains(fromJSON(${{github.event.inputs.backend}}), 'pytorch') && contains(fromJSON(${{github.event.inputs.model}}), 'quantization') run: | pytest autotest/tools/quantization/test_quantization_w8a8.py -n 8 --alluredir=allure-results - name: Test lmdeploy - quantization kv int8 and w4a16 @@ -120,21 +120,21 @@ jobs: pytest autotest/tools/convert -m 'not pr_test' -n 8 --alluredir=allure-results - name: Test lmdeploy - chat workspace continue-on-error: true - if: contains(fromJSON(${{github.event.inputs.backend}}), 'turbomind') && contains(fromJSON(${{github.event.inputs.model}}), 'chat') + if: contains(fromJSON(${{github.event.inputs.backend}}), 'turbomind') && contains(fromJSON(${{github.event.inputs.model}}), 'chat') timeout-minutes: 20 run: | pytest autotest/tools/chat/test_command_chat_workspace.py -m 'gpu_num_1 and not pr_test' -n 8 --alluredir=allure-results pytest autotest/tools/chat/test_command_chat_workspace.py -m 'gpu_num_2 and not pr_test' -n 4 --alluredir=allure-results - name: Test lmdeploy - chat hf turbomind continue-on-error: true - if: contains(fromJSON(${{github.event.inputs.backend}}), 'turbomind') && contains(fromJSON(${{github.event.inputs.model}}), 'chat') + if: contains(fromJSON(${{github.event.inputs.backend}}), 'turbomind') && contains(fromJSON(${{github.event.inputs.model}}), 'chat') timeout-minutes: 20 run: | pytest autotest/tools/chat/test_command_chat_hf_turbomind.py -m 'gpu_num_1 and not pr_test' -n 8 --alluredir=allure-results pytest autotest/tools/chat/test_command_chat_hf_turbomind.py -m 'gpu_num_2 and not pr_test' -n 4 --alluredir=allure-results - name: Test lmdeploy - chat hf torch continue-on-error: true - if: contains(fromJSON(${{github.event.inputs.backend}}), 'pytorch') && contains(fromJSON(${{github.event.inputs.model}}), 'chat') + if: contains(fromJSON(${{github.event.inputs.backend}}), 'pytorch') && contains(fromJSON(${{github.event.inputs.model}}), 'chat') timeout-minutes: 20 run: | pytest autotest/tools/chat/test_command_chat_hf_pytorch.py -m 'gpu_num_1 and not pr_test' -n 8 --alluredir=allure-results @@ -155,14 +155,14 @@ jobs: pytest autotest/tools/pipeline/test_pipeline_chat_pytorch.py -m 'gpu_num_2 and not pr_test' -n 4 --alluredir=allure-results - name: Test lmdeploy - restful turbomind continue-on-error: true - if: contains(fromJSON(${{github.event.inputs.backend}}), 'turbomind') && contains(fromJSON(${{github.event.inputs.model}}), 'restful') + if: contains(fromJSON(${{github.event.inputs.backend}}), 'turbomind') && contains(fromJSON(${{github.event.inputs.model}}), 'restful') timeout-minutes: 30 run: | pytest autotest/tools/restful/test_restful_chat_turbomind.py -m 'gpu_num_1 and not pr_test' -n 8 --alluredir=allure-results pytest autotest/tools/restful/test_restful_chat_turbomind.py -m 'gpu_num_2 and not pr_test' -n 4 --alluredir=allure-results - name: Test lmdeploy - restful torch continue-on-error: true - if: contains(fromJSON(${{github.event.inputs.backend}}), 'pytorch') && contains(fromJSON(${{github.event.inputs.model}}), 'restful') + if: contains(fromJSON(${{github.event.inputs.backend}}), 'pytorch') && contains(fromJSON(${{github.event.inputs.model}}), 'restful') timeout-minutes: 40 run: | pytest autotest/tools/restful/test_restful_chat_pytorch.py -m 'gpu_num_1 and not pr_test' -n 8 --alluredir=allure-results diff --git a/autotest/tools/pipeline/test_pipeline_chat_pytorch.py b/autotest/tools/pipeline/test_pipeline_chat_pytorch.py index 16cc561f67..1793587ee8 100644 --- a/autotest/tools/pipeline/test_pipeline_chat_pytorch.py +++ b/autotest/tools/pipeline/test_pipeline_chat_pytorch.py @@ -42,7 +42,7 @@ def test_pipeline_chat_pytorch_tp2(config, common_case_config, model, worker_id): if 'gw' in worker_id: os.environ['CUDA_VISIBLE_DEVICES'] = get_cuda_id_by_workerid(worker_id, - tp_num=2) + tp_num=2) p = Process(target=run_pipeline_chat_test, args=(config, common_case_config, model, 'pytorch')) p.start() diff --git a/autotest/tools/pipeline/test_pipeline_chat_turbomind.py b/autotest/tools/pipeline/test_pipeline_chat_turbomind.py index d6446190ac..ae73009dbd 100644 --- a/autotest/tools/pipeline/test_pipeline_chat_turbomind.py +++ b/autotest/tools/pipeline/test_pipeline_chat_turbomind.py @@ -32,7 +32,7 @@ def test_pipeline_chat_tp1(config, common_case_config, model, worker_id): def test_pipeline_chat_tp2(config, common_case_config, model, worker_id): if 'gw' in worker_id: os.environ['CUDA_VISIBLE_DEVICES'] = get_cuda_id_by_workerid(worker_id, - tp_num=2) + tp_num=2) p = Process(target=run_pipeline_chat_test, args=(config, common_case_config, model, 'turbomind')) p.start()