Skip to content

Commit

Permalink
feature: Add automated benchmarking to GitHub
Browse files Browse the repository at this point in the history
  • Loading branch information
kshyatt-aws committed Aug 27, 2024
1 parent 0041a2b commit 044b737
Show file tree
Hide file tree
Showing 3 changed files with 6,466 additions and 0 deletions.
46 changes: 46 additions & 0 deletions .github/workflows/benchmark.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
name: Benchmark

on:
push:
branches: [ main ]
pull_request:

jobs:
benchmark:
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- uses: actions/checkout@v4
- name: Install juliaup
uses: julia-actions/[email protected]
with:
channel: '1'
- name: Update Julia registry
shell: julia --project=. --color=yes {0}
run: |
using Pkg
Pkg.Registry.update()
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: 3.9
- name: Install dependencies
run: |
pip install -e .[test] # to put juliapkg.json in sys.path
python -c 'import juliacall' # force install of all deps
- name: Benchmark
run: |
pytest -n 0 benchmark/benchmark.py --benchmark-json=benchmark/output.json
- name: Store benchmark result
uses: benchmark-action/github-action-benchmark@v1
with:
name: Python Benchmark with pytest-benchmark
tool: 'pytest'
output-file-path: benchmark/output.json
github-token: ${{ secrets.GITHUB_TOKEN }}
# don't auto-deploy to gh-pages for now
#auto-push: true
# Show alert with commit comment on detecting possible performance regression
alert-threshold: '200%'
comment-on-alert: true
fail-on-alert: true
41 changes: 41 additions & 0 deletions benchmark/benchmark.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
import numpy as np
import pytest
from braket.devices import LocalSimulator
from braket.ir.openqasm import Program

# always the same for repeatability
np.random.seed(0x1C2C6D66)

n_qubits = range(3, 21)
shots_and_results = (
(0, "state_vector"),
(0, "probability"),
(0, "expectation z(q[0])"),
(0, "variance y(q[0])"),
(100, "probability"),
(100, "expectation z(q[0])"),
(100, "variance y(q[0])"),
(100, "sample z(q[0])"),
)


def generate_ghz(nq: int, result_type: str):
source = f"OPENQASM 3.0;\nqubit[{nq}] q;\nh q[0];\n"
for q in range(1, nq - 1):
source += f"cnot q[0], q[{q}];\n"

source += f"#pragma braket result {result_type}\n"
return source


def run_sim(oq3_prog, sim, shots):
sim.run(oq3_prog, shots=shots)
return


@pytest.mark.parametrize("nq", n_qubits)
@pytest.mark.parametrize("shots_and_results", shots_and_results)
def test_benchmark(benchmark, nq, shots_and_results):
shots, result_type = shots_and_results
oq3_prog = Program(source=generate_ghz(nq, result_type))
benchmark.pedantic(run_sim, args=(oq3_prog, LocalSimulator("braket_sv_v2"), shots))
Loading

0 comments on commit 044b737

Please sign in to comment.