Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feature: Add automated benchmarking to GitHub #35

Merged
merged 11 commits into from
Oct 7, 2024
47 changes: 47 additions & 0 deletions .github/workflows/benchmark.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
name: Benchmark

on:
push:
branches: [ main ]
pull_request:

jobs:
benchmark:
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- uses: actions/checkout@v4
- name: Install juliaup
uses: julia-actions/[email protected]
with:
channel: '1'
- name: Update Julia registry
shell: julia --project=. --color=yes {0}
run: |
using Pkg
Pkg.Registry.update()
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: 3.9
- name: Install dependencies
run: |
pip install -e .[test] # to put juliapkg.json in sys.path
python -c 'import juliacall' # force install of all deps
- name: Benchmark
run: |
pytest -n 0 benchmark/benchmark.py --benchmark-json=benchmark/output.json
- name: Store benchmark result
uses: benchmark-action/github-action-benchmark@v1
with:
name: Python Benchmark with pytest-benchmark
tool: 'pytest'
output-file-path: benchmark/output.json
github-token: ${{ secrets.GITHUB_TOKEN }}
# don't auto-deploy to gh-pages for now
#auto-push: true
# Show alert with commit comment on detecting possible performance regression
skip-fetch-gh-pages: true
alert-threshold: '200%'
comment-on-alert: true
fail-on-alert: true
151 changes: 151 additions & 0 deletions benchmark/benchmark.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,151 @@
import numpy as np
import pytest
from braket.devices import LocalSimulator
from braket.ir.openqasm import Program

# always the same for repeatability
np.random.seed(0x1C2C6D66)

batch_size = (10, 100)
n_qubits = range(3, 21)
exact_shots_results = (
"state_vector",
"density_matrix q[0], q[1]",
"probability",
"expectation z(q[0])",
"variance y(q[0])",
)
nonzero_shots_results = (
"probability",
"expectation z(q[0])",
"variance y(q[0])",
"sample z(q[0])",
)


def ghz(nq: int, result_type: str):
source = f"OPENQASM 3.0;\nqubit[{nq}] q;\nh q[0];\n"
for q in range(1, nq - 1):
source += f"cnot q[0], q[{q}];\n"

source += f"#pragma braket result {result_type}\n"
return source


def qft(nq: int, result_type: str):
source = f"OPENQASM 3.0;\nqubit[{nq}] q;\n"
for q in range(nq - 1):
angle = np.pi / 2.0
source += f"h q[{q}];\n"
for ctrl_q in range(q + 1, nq - 1):
source += f"cphaseshift({angle}) q[{ctrl_q}], q[{q}];\n"
angle /= 2.0

source += f"#pragma braket result {result_type}\n"
return source


def run_sim(oq3_prog, sim, shots):
sim.run(oq3_prog, shots=shots)
return


def run_sim_batch(oq3_prog, sim, shots):
sim.run_batch(oq3_prog, shots=shots)
return


device_ids = ("braket_sv", "braket_sv_v2", "braket_dm", "braket_dm_v2")

generators = (ghz, qft)


@pytest.mark.parametrize("device_id", device_ids)
@pytest.mark.parametrize("nq", n_qubits)
@pytest.mark.parametrize("exact_results", exact_shots_results)
@pytest.mark.parametrize("circuit", generators)
def test_exact_shots(benchmark, device_id, nq, exact_results, circuit):
if device_id in ("braket_dm_v2", "braket_dm") and (
exact_results in ("state_vector",) or nq > 10
):
pytest.skip()
if (
device_id in ("braket_sv",)
and exact_results in ("density_matrix q[0], q[1]",)
and nq >= 17
):
pytest.skip()
result_type = exact_results
oq3_prog = Program(source=circuit(nq, result_type))
sim = LocalSimulator(device_id)
benchmark.pedantic(run_sim, args=(oq3_prog, sim, 0), iterations=5, warmup_rounds=1)


@pytest.mark.parametrize("device_id", device_ids)
@pytest.mark.parametrize("nq", n_qubits)
@pytest.mark.parametrize("batch_size", batch_size)
@pytest.mark.parametrize("exact_results", exact_shots_results)
@pytest.mark.parametrize("circuit", generators)
def test_exact_shots_batched(
benchmark, device_id, nq, batch_size, exact_results, circuit
):
if device_id in ("braket_dm_v2", "braket_dm") and (
exact_results in ("state_vector,") or nq >= 5
):
pytest.skip()
if nq >= 10:
pytest.skip()
# skip all for now as this is very expensive
pytest.skip()
result_type = exact_results
oq3_prog = [Program(source=circuit(nq, result_type)) for _ in range(batch_size)]
sim = LocalSimulator(device_id)
benchmark.pedantic(
run_sim_batch, args=(oq3_prog, sim, 0), iterations=5, warmup_rounds=1
)


shots = (100,)


@pytest.mark.parametrize("device_id", device_ids)
@pytest.mark.parametrize("nq", n_qubits)
@pytest.mark.parametrize("shots", shots)
@pytest.mark.parametrize("nonzero_shots_results", nonzero_shots_results)
@pytest.mark.parametrize("circuit", generators)
def test_nonzero_shots(benchmark, device_id, nq, shots, nonzero_shots_results, circuit):
if device_id in ("braket_dm_v2", "braket_dm") and nq > 10:
pytest.skip()
result_type = nonzero_shots_results
oq3_prog = Program(source=circuit(nq, result_type))
sim = LocalSimulator(device_id)
benchmark.pedantic(
run_sim, args=(oq3_prog, sim, shots), iterations=5, warmup_rounds=1
)
del sim


@pytest.mark.parametrize("device_id", device_ids)
@pytest.mark.parametrize("nq", n_qubits)
@pytest.mark.parametrize("batch_size", batch_size)
@pytest.mark.parametrize("shots", shots)
@pytest.mark.parametrize("nonzero_shots_results", nonzero_shots_results)
@pytest.mark.parametrize("circuit", generators)
def test_nonzero_shots_batched(
benchmark, device_id, nq, batch_size, shots, nonzero_shots_results, circuit
):
if device_id in ("braket_dm_v2", "braket_dm") and nq >= 5:
pytest.skip()
if nq >= 10:
pytest.skip()

# skip all for now as this is very expensive
pytest.skip()

result_type = nonzero_shots_results
oq3_prog = [Program(source=circuit(nq, result_type)) for _ in range(batch_size)]
sim = LocalSimulator(device_id)
benchmark.pedantic(
run_sim_batch, args=(oq3_prog, sim, shots), iterations=5, warmup_rounds=1
)
del sim
Loading
Loading