From 39c3676631afca3548eee3e9979786c1e64843ab Mon Sep 17 00:00:00 2001 From: root Date: Mon, 6 Nov 2023 23:31:13 +0000 Subject: [PATCH] Remove CI Tests --- .github/workflows/self-push-amd.yml | 36 ++--------------------------- 1 file changed, 2 insertions(+), 34 deletions(-) diff --git a/.github/workflows/self-push-amd.yml b/.github/workflows/self-push-amd.yml index fdd59cf1b6b391..43bcdcf7fe4b40 100644 --- a/.github/workflows/self-push-amd.yml +++ b/.github/workflows/self-push-amd.yml @@ -17,44 +17,12 @@ env: RUN_PT_TF_CROSS_TESTS: 1 jobs: - check_runner_status: - name: Check Runner Status - runs-on: rocm - steps: - - name: Checkout transformers - uses: actions/checkout@v3 - with: - fetch-depth: 2 - - - name: Check Runner Status - run: python utils/check_self_hosted_runner.py --target_runners amd-mi210-single-gpu-ci-runner-docker --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }} - - check_runners: - name: Check Runners - needs: check_runner_status - strategy: - matrix: - machine_type: [single-gpu, multi-gpu] - runs-on: [self-hosted, docker-gpu, amd-gpu, '${{ matrix.machine_type }}', '${{ inputs.gpu_flavor }}'] - container: - image: huggingface/transformers-pytorch-amd-gpu-push-ci # <--- We test only for PyTorch for now - options: --device /dev/kfd --device /dev/dri --env HIP_VISIBLE_DEVICES --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - steps: - - name: ROCM-SMI - run: | - rocminfo | grep "Agent" -A 14 - - name: Show HIP environment - run: | - echo "HIP: $HIP_VISIBLE_DEVICES" - echo "ROCR: $ROCR_VISIBLE_DEVICES" - setup_gpu: name: Setup - needs: check_runners strategy: matrix: machine_type: [single-gpu, multi-gpu] - runs-on: [self-hosted, docker-gpu, amd-gpu, '${{ matrix.machine_type }}', '${{ inputs.gpu_flavor }}'] + runs-on: [rocm ,'${{ matrix.machine_type }}', '${{ inputs.gpu_flavor }}'] container: image: huggingface/transformers-pytorch-amd-gpu-push-ci # <--- We test only for PyTorch for now options: --device /dev/kfd --device /dev/dri --env HIP_VISIBLE_DEVICES --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ @@ -152,7 +120,7 @@ jobs: matrix: folders: ${{ fromJson(needs.setup_gpu.outputs.matrix) }} machine_type: [single-gpu, multi-gpu] - runs-on: [self-hosted, docker-gpu, amd-gpu, '${{ matrix.machine_type }}', '${{ inputs.gpu_flavor }}'] + runs-on: [rocm, '${{ matrix.machine_type }}', '${{ inputs.gpu_flavor }}'] container: image: huggingface/transformers-pytorch-amd-gpu-push-ci # <--- We test only for PyTorch for now options: --device /dev/kfd --device /dev/dri --env HIP_VISIBLE_DEVICES --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/