[pre-commit.ci] pre-commit autoupdate #37
Workflow file for this run
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: "Run Compare Post Benchmark" | |
on: [push, pull_request] | |
#on: | |
# workflow_run: | |
# workflows: [Tests] | |
# types: | |
# - in_progress | |
concurrency: | |
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} | |
cancel-in-progress: true | |
jobs: | |
benchmark: | |
# Job that clones pyuvsim@main, sets up a conda environment with the necessary dependencies, | |
# then locally installs pyuvsim and additionally installs pytest-benchmark and requests from | |
# PYPI. Runs in parallel as a matrix with input individual reference simulations. The "id" | |
# input is passed as a flag to pytest which parametrizes the reference simulation test | |
# function. pytest-benchmark output is saved as an artifact with its current workflow run | |
# and attempt as part of name key. | |
# | |
# Link to discussion of artifacts | |
# https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/storing-and-sharing-data-from-a-workflow#about-workflow-artifacts | |
# https://github.com/actions/upload-artifact | |
# | |
# uncomment when Tests is working and done testing | |
#if: github.event.workflow_run.conclusion == 'success' | |
name: Performance Benchmark | |
env: | |
ENV_NAME: pyuvsim_tests_mpich | |
PYTHON: "3.12" | |
runs-on: ubuntu-latest | |
strategy: | |
# all jobs should run in parallel | |
matrix: | |
id: [1.1_uniform, 1.1_gauss, 1.1_mwa, 1.2_uniform, 1.2_gauss, 1.3_uniform, 1.3_gauss] | |
defaults: | |
run: | |
# Adding -l {0} helps ensure conda can be found properly. | |
shell: bash -l {0} | |
steps: | |
- uses: actions/checkout@main | |
- name: Setup Miniforge | |
uses: conda-incubator/setup-miniconda@v3 | |
with: | |
miniforge-version: latest | |
python-version: ${{ env.PYTHON }} | |
environment-file: ci/${{ env.ENV_NAME }}.yaml | |
activate-environment: ${{ env.ENV_NAME }} | |
run-post: false | |
- name: Conda Info | |
run: | | |
conda info -a | |
conda list | |
PYVER=`python -c "import sys; print('{:d}.{:d}'.format(sys.version_info.major, sys.version_info.minor))"` | |
if [[ $PYVER != $PYTHON ]]; then | |
exit 1; | |
fi | |
# pip install benchmark utility and requests from PYPI, and local install pyuvsim | |
- name: Install | |
run: | | |
pip install pytest-benchmark | |
pip install requests | |
pip install . | |
# make the artifacts directory, then run pytest using mpiexec with only 1 node and core, specifying the | |
# reference simulation to run using the "refsim" flag. Save the pytest benchmark output in artifacts/ | |
# with a sufficiently unique name | |
- name: Run benchmark | |
run: | | |
mkdir artifacts/ | |
mpiexec -n 1 -np 1 pytest --refsim=${{ matrix.id }} --benchmark-only --benchmark-json artifacts/output_${{ matrix.id }}.json -s | |
# upload the benchmark output as an artifact with name key corresponding to the current | |
# workflow run and attempt only store artifacts for 1 day | |
- name: Upload result artifacts | |
uses: actions/upload-artifact@v4 | |
with: | |
name: ${{ github.run_id }}-${{ github.run_attempt }}-${{ matrix.id }} | |
path: artifacts/ | |
if-no-files-found: error | |
include-hidden-files: true | |
retention-days: 1 | |
collate-post-benchmark: | |
# Job that loads the saved artifacts corresponding to the specific workflow run and attempt id, | |
# then creates a net benchmark output file named output.json with a python script action. The | |
# net benchmark file should still be accurate except the explicit machine info will be mostly | |
# lost. The net benchmark file is then fed to github-actions-benchmark which compares the | |
# current benchmark output with the latest data in the gh-pages branch. If the current workflow | |
# is a push to main, github-actions-benchmark then pushes the current benchmark output to | |
# gh-pages. If, during the benchmark comparison, a performance regression occurs, a comment of | |
# the benchmark comparison output is made on the workflow and this Job fails. | |
# | |
# Inspired by this workflow by yewstack/yew and the github-action-benchmark README: | |
# https://github.com/yewstack/yew/blob/master/.github/workflows/benchmark.yml | |
# https://github.com/yewstack/yew/blob/master/.github/workflows/post-benchmark.yml | |
# https://github.com/benchmark-action/github-action-benchmark | |
# https://github.com/actions/download-artifact | |
name: Concatenate and Post Benchmark Results | |
needs: benchmark | |
runs-on: ubuntu-latest | |
steps: | |
# Checkout repo for the github-action-benchmark action | |
- uses: actions/checkout@v4 | |
# setup python | |
- uses: actions/setup-python@v4 | |
with: | |
python-version: '3.x' | |
# only downloads artifacts from current workflow and run attempt via the pattern matching | |
# loads the saved benchmark artifacts from running the benchmark matrix into artifacts/ | |
- name: Download result artifacts | |
uses: actions/download-artifact@v4 | |
with: | |
github-token: "${{ secrets.GITHUB_TOKEN }}" | |
pattern: ${{ github.run_id }}-${{ github.run_attempt }}-* | |
merge-multiple: true | |
path: artifacts | |
# prints directory info recursively, removable | |
# (could maybe swap this to exa or lsd because base ls doesn't do tree) | |
- name: Display structure of downloaded files | |
run: ls -R | |
# approach to putting all the benchmark output in one file, with the machine/run info | |
# of only one the pytest benchmark runs. Loads all the benchmark output artifact files, | |
# then takes the benchmark timing infor fron n-1 files and adds it to the first file. | |
# With this approach, benchmark comparison output is only a single table, and we only | |
# have one comment on alert in the workflow. | |
- uses: jannekem/run-python-script-action@v1 | |
with: | |
script: | | |
import os | |
import json | |
# make list of paths to artifact files excluding hidden files | |
filepath_arr = [os.path.join('artifacts', bench) for bench in os.listdir('artifacts') if not bench.startswith('.')] | |
print(filepath_arr) | |
output_jsons = [] | |
# open each filepath in the filepath_arr, load it as a json, and append it to an empty list | |
for filepath in filepath_arr: | |
with open(filepath) as f: | |
output_jsons.append(json.load(f)) | |
# choose the first json as the one to modify to contain all the benchmark data | |
net_json = output_jsons[0] | |
# iterate through the other jsons (1-n) and append their benchmark data to net_json | |
for json_out in output_jsons[1:]: | |
net_json['benchmarks'].append(json_out['benchmarks'][0]) | |
# save net_json as json with name output.json in current working directory | |
with open('output.json', 'w') as f: | |
json.dump(net_json, f) | |
# Print github event_name and ref_name and the boolean check for whether gh-pages should be updated | |
- name: Print Event, Ref, and Upload Boolean | |
run: | | |
echo "Event Name: ${{ github.event_name }}" | |
echo "Ref Name: ${{ github.ref_name }}" | |
echo "Update gh-pages: ${{ github.event_name == 'push' && github.ref_name == 'main' }}" | |
# Compares the data from the specified "output-file-path" and compares | |
# with the latest data from the gh-pages branch. If performance regression | |
# occurs, fails the test and alerts. Will only comment in if performance | |
# regression has occurred. | |
# NOTE: it is important that this does not modify gh-pages on pull request | |
# https://github.com/benchmark-action/github-action-benchmark?tab=readme-ov-file#caveats | |
# This only updates gh-pages if a push to main occurs | |
- name: Compare benchmarks | |
uses: benchmark-action/github-action-benchmark@v1 | |
with: | |
# What benchmark tool the output.txt came from | |
tool: 'pytest' | |
# Where the output from the benchmark tool is stored | |
output-file-path: output.json | |
# Where the previous data file is stored | |
# should fail consistently | |
alert-threshold: "120%" | |
# Workflow will fail when an alert happens | |
fail-on-alert: true | |
# Comment on the PR if the branch is not a fork | |
comment-on-alert: true | |
# Enable Job Summary for PRs | |
summary-always: true | |
# Always leave a comment | |
#comment-always: true | |
github-token: ${{ secrets.GITHUB_TOKEN }} | |
# Push and deploy GitHub pages branch automatically | |
auto-push: ${{ github.event_name == 'push' && github.ref_name == 'main' }} | |
save-data-file: ${{ github.event_name == 'push' && github.ref_name == 'main' }} |