diff --git a/.github/workflows/run_comparative_benchmark.yml b/.github/workflows/run_comparative_benchmark.yml index c1736068..a4e82b7e 100644 --- a/.github/workflows/run_comparative_benchmark.yml +++ b/.github/workflows/run_comparative_benchmark.yml @@ -11,6 +11,7 @@ name: Comparative Benchmarks on: # Will only run when manually triggered. workflow_dispatch: + pull_request: concurrency: # A PR number if a pull request and otherwise the commit hash. This cancels @@ -98,108 +99,108 @@ jobs: gcloud storage cp "${XLA_TOOLS_DIR_ARCHIVE}" "${XLA_TOOLS_DIR_GCS_ARTIFACT}" echo "xla-tools-dir-gcs-artifact=${XLA_TOOLS_DIR_GCS_ARTIFACT}" >> "${GITHUB_OUTPUT}" - benchmark_on_a2-highgpu-1g: - needs: [setup, build_xla_tools] - timeout-minutes: 1440 - runs-on: - - self-hosted # must come first - - runner-group=${{ needs.setup.outputs.runner-group }} - - environment=prod - - machine-type=a2-highgpu-1g - env: - BENCHMARK_GCS_DIR: ${{ needs.setup.outputs.benchmark-gcs-dir }} - RESULTS_DIR: results-dir - TARGET_DEVICE: a2-highgpu-1g - XLA_TOOLS_DIR: ${{ needs.build_xla_tools.outputs.xla-tools-dir }} - XLA_TOOLS_DIR_ARCHIVE: ${{ needs.build_xla_tools.outputs.xla-tools-dir-archive }} - XLA_TOOLS_DIR_GCS_ARTIFACT: ${{ needs.build_xla_tools.outputs.xla-tools-dir-gcs-artifact }} - steps: - - name: "Checking out PR repository" - uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v2.5.0 - - name: "Setup" - id: setup - run: | - echo "results-gcs-dir=${BENCHMARK_GCS_DIR}/${TARGET_DEVICE}-results" >> "${GITHUB_OUTPUT}" - mkdir "${RESULTS_DIR}" - - name: "Downloading and unpacking XLA tools" - run: | - gcloud storage cp "${XLA_TOOLS_DIR_GCS_ARTIFACT}" "${XLA_TOOLS_DIR_ARCHIVE}" - tar -xvf "${XLA_TOOLS_DIR_ARCHIVE}" - - name: "Benchmarking XLA-HLO:GPU" - env: - XLA_HLO_RESULTS_JSON: xla-hlo.json - RESULTS_GCS_DIR: ${{ steps.setup.outputs.results-gcs-dir }} - run: | - RESULTS_PATH="${RESULTS_DIR}/${XLA_HLO_RESULTS_JSON}" - docker run --gpus all --mount="type=bind,src="${PWD}",target=/work" --workdir="/work" \ - --env "OOBI_XLA_TOOLS_DIR=${XLA_TOOLS_DIR}" \ - "gcr.io/iree-oss/openxla-benchmark/cuda11.8-cudnn8.9@sha256:f43984cd6c16ad1faad4dfb6aac3f53e552dd728c9330c90752e78ae51e4276f" \ - ./comparative_benchmark/xla_hlo/benchmark_all.sh \ - "${TARGET_DEVICE}"\ - "${RESULTS_PATH}" - gcloud storage cp "${RESULTS_PATH}" "${RESULTS_GCS_DIR}/" - - name: "Benchmarking JAX-XLA:GPU" - env: - JAX_XLA_RESULTS_JSON: jax-xla.json - RESULTS_GCS_DIR: ${{ steps.setup.outputs.results-gcs-dir }} - run: | - RESULTS_PATH="${RESULTS_DIR}/${JAX_XLA_RESULTS_JSON}" - docker run --gpus all --mount="type=bind,src="${PWD}",target=/work" --workdir="/work" \ - "gcr.io/iree-oss/openxla-benchmark/cuda11.8-cudnn8.9@sha256:f43984cd6c16ad1faad4dfb6aac3f53e552dd728c9330c90752e78ae51e4276f" \ - ./comparative_benchmark/jax/benchmark_xla.sh \ - "${TARGET_DEVICE}"\ - "${RESULTS_PATH}" - gcloud storage cp "${RESULTS_PATH}" "${RESULTS_GCS_DIR}/" -# # Disabled due to https://github.com/openxla/openxla-pjrt-plugin/issues/203. -# - name: "Benchmarking JAX-IREE:GPU" +# benchmark_on_a2-highgpu-1g: +# needs: [setup, build_xla_tools] +# timeout-minutes: 1440 +# runs-on: +# - self-hosted # must come first +# - runner-group=${{ needs.setup.outputs.runner-group }} +# - environment=prod +# - machine-type=a2-highgpu-1g +# env: +# BENCHMARK_GCS_DIR: ${{ needs.setup.outputs.benchmark-gcs-dir }} +# RESULTS_DIR: results-dir +# TARGET_DEVICE: a2-highgpu-1g +# XLA_TOOLS_DIR: ${{ needs.build_xla_tools.outputs.xla-tools-dir }} +# XLA_TOOLS_DIR_ARCHIVE: ${{ needs.build_xla_tools.outputs.xla-tools-dir-archive }} +# XLA_TOOLS_DIR_GCS_ARTIFACT: ${{ needs.build_xla_tools.outputs.xla-tools-dir-gcs-artifact }} +# steps: +# - name: "Checking out PR repository" +# uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v2.5.0 +# - name: "Setup" +# id: setup +# run: | +# echo "results-gcs-dir=${BENCHMARK_GCS_DIR}/${TARGET_DEVICE}-results" >> "${GITHUB_OUTPUT}" +# mkdir "${RESULTS_DIR}" +# - name: "Downloading and unpacking XLA tools" +# run: | +# gcloud storage cp "${XLA_TOOLS_DIR_GCS_ARTIFACT}" "${XLA_TOOLS_DIR_ARCHIVE}" +# tar -xvf "${XLA_TOOLS_DIR_ARCHIVE}" +# - name: "Benchmarking XLA-HLO:GPU" # env: -# JAX_IREE_RESULTS_JSON: jax-iree.json +# XLA_HLO_RESULTS_JSON: xla-hlo.json # RESULTS_GCS_DIR: ${{ steps.setup.outputs.results-gcs-dir }} # run: | -# RESULTS_PATH="${RESULTS_DIR}/${JAX_IREE_RESULTS_JSON}" +# RESULTS_PATH="${RESULTS_DIR}/${XLA_HLO_RESULTS_JSON}" # docker run --gpus all --mount="type=bind,src="${PWD}",target=/work" --workdir="/work" \ +# --env "OOBI_XLA_TOOLS_DIR=${XLA_TOOLS_DIR}" \ # "gcr.io/iree-oss/openxla-benchmark/cuda11.8-cudnn8.9@sha256:f43984cd6c16ad1faad4dfb6aac3f53e552dd728c9330c90752e78ae51e4276f" \ -# ./comparative_benchmark/jax/benchmark_iree.sh \ +# ./comparative_benchmark/xla_hlo/benchmark_all.sh \ +# "${TARGET_DEVICE}"\ +# "${RESULTS_PATH}" +# gcloud storage cp "${RESULTS_PATH}" "${RESULTS_GCS_DIR}/" +# - name: "Benchmarking JAX-XLA:GPU" +# env: +# JAX_XLA_RESULTS_JSON: jax-xla.json +# RESULTS_GCS_DIR: ${{ steps.setup.outputs.results-gcs-dir }} +# run: | +# RESULTS_PATH="${RESULTS_DIR}/${JAX_XLA_RESULTS_JSON}" +# docker run --gpus all --mount="type=bind,src="${PWD}",target=/work" --workdir="/work" \ +# "gcr.io/iree-oss/openxla-benchmark/cuda11.8-cudnn8.9@sha256:f43984cd6c16ad1faad4dfb6aac3f53e552dd728c9330c90752e78ae51e4276f" \ +# ./comparative_benchmark/jax/benchmark_xla.sh \ +# "${TARGET_DEVICE}"\ +# "${RESULTS_PATH}" +# gcloud storage cp "${RESULTS_PATH}" "${RESULTS_GCS_DIR}/" +## # Disabled due to https://github.com/openxla/openxla-pjrt-plugin/issues/203. +## - name: "Benchmarking JAX-IREE:GPU" +## env: +## JAX_IREE_RESULTS_JSON: jax-iree.json +## RESULTS_GCS_DIR: ${{ steps.setup.outputs.results-gcs-dir }} +## run: | +## RESULTS_PATH="${RESULTS_DIR}/${JAX_IREE_RESULTS_JSON}" +## docker run --gpus all --mount="type=bind,src="${PWD}",target=/work" --workdir="/work" \ +## "gcr.io/iree-oss/openxla-benchmark/cuda11.8-cudnn8.9@sha256:f43984cd6c16ad1faad4dfb6aac3f53e552dd728c9330c90752e78ae51e4276f" \ +## ./comparative_benchmark/jax/benchmark_iree.sh \ +## "${TARGET_DEVICE}"\ +## "${RESULTS_PATH}" +## gcloud storage cp "${RESULTS_PATH}" "${RESULTS_GCS_DIR}/" +# - name: "Benchmarking TF-XLA:GPU" +# env: +# TF_XLA_RESULTS_JSON: tf-xla.json +# RESULTS_GCS_DIR: ${{ steps.setup.outputs.results-gcs-dir }} +# run: | +# RESULTS_PATH="${RESULTS_DIR}/${TF_XLA_RESULTS_JSON}" +# docker run --gpus all --mount="type=bind,src="${PWD}",target=/work" --workdir="/work" \ +# "gcr.io/iree-oss/openxla-benchmark/cuda11.8-cudnn8.9@sha256:f43984cd6c16ad1faad4dfb6aac3f53e552dd728c9330c90752e78ae51e4276f" \ +# ./comparative_benchmark/tf_xla/benchmark_all.sh \ +# "${TARGET_DEVICE}"\ +# "${RESULTS_PATH}" +# gcloud storage cp "${RESULTS_PATH}" "${RESULTS_GCS_DIR}/" +# - name: "Benchmarking PT-Inductor:GPU" +# env: +# PT_INDUCTOR_RESULTS_JSON: pt-inductor.json +# RESULTS_GCS_DIR: ${{ steps.setup.outputs.results-gcs-dir }} +# run: | +# RESULTS_PATH="${RESULTS_DIR}/${PT_INDUCTOR_RESULTS_JSON}" +# docker run --gpus all --mount="type=bind,src="${PWD}",target=/work" --workdir="/work" \ +# "gcr.io/iree-oss/openxla-benchmark/cuda11.8-cudnn8.9@sha256:f43984cd6c16ad1faad4dfb6aac3f53e552dd728c9330c90752e78ae51e4276f" \ +# ./comparative_benchmark/pt_inductor/benchmark_all.sh \ # "${TARGET_DEVICE}"\ # "${RESULTS_PATH}" # gcloud storage cp "${RESULTS_PATH}" "${RESULTS_GCS_DIR}/" - - name: "Benchmarking TF-XLA:GPU" - env: - TF_XLA_RESULTS_JSON: tf-xla.json - RESULTS_GCS_DIR: ${{ steps.setup.outputs.results-gcs-dir }} - run: | - RESULTS_PATH="${RESULTS_DIR}/${TF_XLA_RESULTS_JSON}" - docker run --gpus all --mount="type=bind,src="${PWD}",target=/work" --workdir="/work" \ - "gcr.io/iree-oss/openxla-benchmark/cuda11.8-cudnn8.9@sha256:f43984cd6c16ad1faad4dfb6aac3f53e552dd728c9330c90752e78ae51e4276f" \ - ./comparative_benchmark/tf_xla/benchmark_all.sh \ - "${TARGET_DEVICE}"\ - "${RESULTS_PATH}" - gcloud storage cp "${RESULTS_PATH}" "${RESULTS_GCS_DIR}/" - - name: "Benchmarking PT-Inductor:GPU" - env: - PT_INDUCTOR_RESULTS_JSON: pt-inductor.json - RESULTS_GCS_DIR: ${{ steps.setup.outputs.results-gcs-dir }} - run: | - RESULTS_PATH="${RESULTS_DIR}/${PT_INDUCTOR_RESULTS_JSON}" - docker run --gpus all --mount="type=bind,src="${PWD}",target=/work" --workdir="/work" \ - "gcr.io/iree-oss/openxla-benchmark/cuda11.8-cudnn8.9@sha256:f43984cd6c16ad1faad4dfb6aac3f53e552dd728c9330c90752e78ae51e4276f" \ - ./comparative_benchmark/pt_inductor/benchmark_all.sh \ - "${TARGET_DEVICE}"\ - "${RESULTS_PATH}" - gcloud storage cp "${RESULTS_PATH}" "${RESULTS_GCS_DIR}/" - benchmark_on_c2-standard-16: + benchmark_on_c2-standard-60: needs: [setup, build_xla_tools] timeout-minutes: 1440 runs-on: - self-hosted # must come first - runner-group=${{ needs.setup.outputs.runner-group }} - environment=prod - - machine-type=c2-standard-16 + - machine-type=c2-standard-60 env: BENCHMARK_GCS_DIR: ${{ needs.setup.outputs.benchmark-gcs-dir }} RESULTS_DIR: results-dir - TARGET_DEVICE: c2-standard-16 + TARGET_DEVICE: c2-standard-60 XLA_TOOLS_DIR: ${{ needs.build_xla_tools.outputs.xla-tools-dir }} XLA_TOOLS_DIR_ARCHIVE: ${{ needs.build_xla_tools.outputs.xla-tools-dir-archive }} XLA_TOOLS_DIR_GCS_ARTIFACT: ${{ needs.build_xla_tools.outputs.xla-tools-dir-gcs-artifact }} diff --git a/common_benchmark_suite/openxla/benchmark/devices/gcp_devices.py b/common_benchmark_suite/openxla/benchmark/devices/gcp_devices.py index dea2a85d..7131fa19 100644 --- a/common_benchmark_suite/openxla/benchmark/devices/gcp_devices.py +++ b/common_benchmark_suite/openxla/benchmark/devices/gcp_devices.py @@ -32,4 +32,18 @@ }, ) -ALL_DEVICES = [GCP_A2_HIGHGPU_1G, GCP_C2_STANDARD_16] +GCP_C2_STANDARD_60 = def_types.DeviceSpec( + name="c2-standard-60", + host_type="gcp", + host_model="c2-standard-60", + host_environment="linux-x86_64", + accelerator_type="cpu", + accelerator_model="intel-cascadelake", + accelerator_architecture="x86_64-cascadelake", + accelerator_attributes={ + "num_of_cores": 30, + "hyper-threading": False, + }, +) + +ALL_DEVICES = [GCP_A2_HIGHGPU_1G, GCP_C2_STANDARD_16, GCP_C2_STANDARD_60] diff --git a/comparative_benchmark/jax/benchmark_iree.sh b/comparative_benchmark/jax/benchmark_iree.sh index 3ef90221..205e05dd 100755 --- a/comparative_benchmark/jax/benchmark_iree.sh +++ b/comparative_benchmark/jax/benchmark_iree.sh @@ -81,15 +81,9 @@ declare -a GPU_BENCHMARK_NAMES=( ) declare -a CPU_BENCHMARK_NAMES=( - # Batch 64 and 128 disabled due to accuracy error: https://github.com/openxla/iree/issues/14601. - "models/RESNET50_FP32_JAX_.+_BATCH1/.+" - # Batch 32 and 64 disabled due to accuracy error: https://github.com/openxla/iree/issues/14601. - "models/BERT_LARGE_FP32_JAX_.+_BATCH1/.+" - # T5 models disabled: https://github.com/openxla/openxla-pjrt-plugin/issues/286. - # "models/T5_LARGE_FP32_JAX_.+_BATCH(1|16|32)/.+" - # "models/T5_4CG_LARGE_FP32_JAX_.+_BATCH(1|16|32)/.+" - # Batch 64 and 128 disabled due to accuracy error: https://github.com/openxla/iree/issues/14601. - "models/GPT2LMHEAD_FP32_JAX_.+_BATCH1/.+" + "models/RESNET50_FP32_JAX_.+_BATCH(1|8|64|128)/.+" + "models/BERT_LARGE_FP32_JAX_.+_BATCH(1|16|24|32)/.+" + "models/T5_LARGE_FP32_JAX_.+_BATCH(1|16|24|32)/.+" ) if [ "${TARGET_DEVICE}" = "a2-highgpu-1g" ]; then @@ -98,7 +92,11 @@ if [ "${TARGET_DEVICE}" = "a2-highgpu-1g" ]; then JAX_PLATFORM="iree_cuda" elif [ "${TARGET_DEVICE}" = "c2-standard-16" ]; then BENCHMARK_NAMES=("${CPU_BENCHMARK_NAMES[@]}") - ITERATIONS=20 + ITERATIONS=5 + JAX_PLATFORM="iree_cpu" +elif [ "${TARGET_DEVICE}" = "c2-standard-60" ]; then + BENCHMARK_NAMES=("${CPU_BENCHMARK_NAMES[@]}") + ITERATIONS=5 JAX_PLATFORM="iree_cpu" else echo "Unsupported target device ${TARGET_DEVICE}." diff --git a/comparative_benchmark/jax/benchmark_xla.sh b/comparative_benchmark/jax/benchmark_xla.sh index adb40d12..17cfa082 100755 --- a/comparative_benchmark/jax/benchmark_xla.sh +++ b/comparative_benchmark/jax/benchmark_xla.sh @@ -44,11 +44,9 @@ declare -a GPU_BENCHMARK_NAMES=( ) declare -a CPU_BENCHMARK_NAMES=( - "models/RESNET50_FP32_JAX_.+_BATCH(1|64|128)/.+" - "models/BERT_LARGE_FP32_JAX_.+_BATCH(1|32|64)/.+" - "models/T5_LARGE_FP32_JAX_.+_BATCH(1|16|32)/.+" - "models/T5_4CG_LARGE_FP32_JAX_.+_BATCH(1|16|32)/.+" - "models/GPT2LMHEAD_FP32_JAX_.+_BATCH(1|64|128)/.+" + "models/RESNET50_FP32_JAX_.+_BATCH(1|8|64|128)/.+" + "models/BERT_LARGE_FP32_JAX_.+_BATCH(1|16|24|32)/.+" + "models/T5_LARGE_FP32_JAX_.+_BATCH(1|16|24|32)/.+" ) if [ "${TARGET_DEVICE}" = "a2-highgpu-1g" ]; then @@ -57,6 +55,9 @@ if [ "${TARGET_DEVICE}" = "a2-highgpu-1g" ]; then elif [ "${TARGET_DEVICE}" = "c2-standard-16" ]; then BENCHMARK_NAMES=("${CPU_BENCHMARK_NAMES[@]}") ITERATIONS=5 +elif [ "${TARGET_DEVICE}" = "c2-standard-60" ]; then + BENCHMARK_NAMES=("${CPU_BENCHMARK_NAMES[@]}") + ITERATIONS=5 else echo "Unsupported target device ${TARGET_DEVICE}." exit 1 diff --git a/comparative_benchmark/pt_inductor/benchmark_all.sh b/comparative_benchmark/pt_inductor/benchmark_all.sh index de9d4237..0aaf676b 100755 --- a/comparative_benchmark/pt_inductor/benchmark_all.sh +++ b/comparative_benchmark/pt_inductor/benchmark_all.sh @@ -39,9 +39,9 @@ declare -a GPU_BENCHMARK_NAMES=( ) declare -a CPU_BENCHMARK_NAMES=( - "models/RESNET50_FP32_PT_.+_BATCH(1|64|128)/.+" + "models/RESNET50_FP32_PT_.+_BATCH(1|8|64|128)/.+" # Batches 32 and 64 disabled: https://github.com/openxla/openxla-benchmark/issues/125. - "models/BERT_LARGE_FP32_PT_.+_BATCH1/.+" + "models/BERT_LARGE_FP32_PT_.+_BATCH(1|16|24|32)/.+" ) if [ "${TARGET_DEVICE}" = "a2-highgpu-1g" ]; then @@ -50,6 +50,9 @@ if [ "${TARGET_DEVICE}" = "a2-highgpu-1g" ]; then elif [ "${TARGET_DEVICE}" = "c2-standard-16" ]; then BENCHMARK_NAMES=("${CPU_BENCHMARK_NAMES[@]}") ITERATIONS=20 +elif [ "${TARGET_DEVICE}" = "c2-standard-60" ]; then + BENCHMARK_NAMES=("${CPU_BENCHMARK_NAMES[@]}") + ITERATIONS=20 else echo "Unsupported target device ${TARGET_DEVICE}." exit 1 diff --git a/comparative_benchmark/tf_xla/benchmark_all.sh b/comparative_benchmark/tf_xla/benchmark_all.sh index 7d994067..c4fcff24 100755 --- a/comparative_benchmark/tf_xla/benchmark_all.sh +++ b/comparative_benchmark/tf_xla/benchmark_all.sh @@ -42,11 +42,11 @@ declare -a GPU_BENCHMARK_NAMES=( ) declare -a CPU_BENCHMARK_NAMES=( - "models/RESNET50_FP32_TF_.+_BATCH(1|64|128)/.+" - "models/BERT_LARGE_FP32_TF_.+_BATCH(1|32|64)/.+" - "models/T5_LARGE_FP32_TF_.+_BATCH(1|16|32)/.+" + "models/RESNET50_FP32_TF_.+_BATCH(1|8|64|128)/.+" + "models/BERT_LARGE_FP32_TF_.+_BATCH(1|16|24|32)/.+" + "models/T5_LARGE_FP32_TF_.+_BATCH(1|16|24|32)/.+" # Batch 128 disabled: https://github.com/openxla/openxla-benchmark/issues/125. - "models/EFFICIENTNETB7_FP32_TF_.+_BATCH(1|64)/.+" + "models/EFFICIENTNETB7_FP32_TF_.+_BATCH(1|64|128)/.+" ) if [ "${TARGET_DEVICE}" = "a2-highgpu-1g" ]; then @@ -55,6 +55,9 @@ if [ "${TARGET_DEVICE}" = "a2-highgpu-1g" ]; then elif [ "${TARGET_DEVICE}" = "c2-standard-16" ]; then BENCHMARK_NAMES=("${CPU_BENCHMARK_NAMES[@]}") ITERATIONS=5 +elif [ "${TARGET_DEVICE}" = "c2-standard-60" ]; then + BENCHMARK_NAMES=("${CPU_BENCHMARK_NAMES[@]}") + ITERATIONS=5 else echo "Unsupported target device ${TARGET_DEVICE}." exit 1 diff --git a/comparative_benchmark/xla_hlo/benchmark_all.sh b/comparative_benchmark/xla_hlo/benchmark_all.sh index cf2ec383..34728304 100755 --- a/comparative_benchmark/xla_hlo/benchmark_all.sh +++ b/comparative_benchmark/xla_hlo/benchmark_all.sh @@ -34,14 +34,12 @@ declare -a GPU_BENCHMARK_NAMES=( ) declare -a CPU_BENCHMARK_NAMES=( - "models/RESNET50_FP32_JAX_.+_BATCH(1|64|128)/.+" - "models/BERT_LARGE_FP32_JAX_.+_BATCH(1|32|64)/.+" - "models/T5_LARGE_FP32_JAX_.+_BATCH(1|16|32)/.+" - "models/T5_4CG_LARGE_FP32_JAX_.+_BATCH(1|16|32)/.+" - "models/GPT2LMHEAD_FP32_JAX_.+_BATCH(1|64|128)/.+" - "models/RESNET50_FP32_TF_.+_BATCH(1|64|128)/.+" - "models/BERT_LARGE_FP32_TF_.+_BATCH(1|32|64)/.+" - "models/T5_LARGE_FP32_TF_.+_BATCH(1|16|32)/.+" + "models/RESNET50_FP32_JAX_.+_BATCH(1|8|64|128)/.+" + "models/BERT_LARGE_FP32_JAX_.+_BATCH(1|16|24|32)/.+" + "models/T5_LARGE_FP32_JAX_.+_BATCH(1|16|24|32)/.+" + "models/RESNET50_FP32_TF_.+_BATCH(1|8|64|128)/.+" + "models/BERT_LARGE_FP32_TF_.+_BATCH(1|16|24|32)/.+" + "models/T5_LARGE_FP32_TF_.+_BATCH(1|16|24|32)/.+" "models/EFFICIENTNETB7_FP32_TF_.+_BATCH(1|64|128)/.+" ) @@ -56,6 +54,10 @@ elif [ "${TARGET_DEVICE}" = "c2-standard-16" ]; then BENCHMARK_NAMES=("${CPU_BENCHMARK_NAMES[@]}") HLO_TOOL="run_hlo_module" ITERATIONS=5 +elif [ "${TARGET_DEVICE}" = "c2-standard-60" ]; then + BENCHMARK_NAMES=("${CPU_BENCHMARK_NAMES[@]}") + HLO_TOOL="run_hlo_module" + ITERATIONS=5 else echo "Unsupported target device ${TARGET_DEVICE}." exit 1