Skip to content

Commit

Permalink
Updated install guide and fixing tutorials (#860)
Browse files Browse the repository at this point in the history
This PR
- Revises the install guide, and thereby fixes #849, #748, #513, and #353, as well as covers the two items with a checkmark in #845
- Removes the section about logging in to NGC, and (validating this) updates integration tests to use the NGC image
- Fixes various typos in the tutorials
  • Loading branch information
bettinaheim authored Nov 10, 2023
1 parent ba3fb4f commit ab92691
Show file tree
Hide file tree
Showing 17 changed files with 538 additions and 132 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/clean_up.yml
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ jobs:

strategy:
matrix:
image_name: [cuda-quantum-dev, cuda-quantum-devdeps, open-mpi] # cuda-quantum
image_name: [cuda-quantum-dev, cuda-quantum-devdeps, open-mpi]
fail-fast: false

steps:
Expand Down
3 changes: 3 additions & 0 deletions .github/workflows/config/md_link_check_config.json
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,9 @@
},
{
"pattern": "^https://epubs.siam.org/doi/10.1137/S0097539796300921"
},
{
"pattern": "^https://vscode.dev/"
}
]
}
3 changes: 3 additions & 0 deletions .github/workflows/config/spellcheck_config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,9 @@ matrix:
- pyspelling.filters.context:
context_visible_first: true
delimiters:
# Ignore multiline content fenced by .. spellcheck-disable and .. spellcheck-enable
- open: '(?s):spellcheck-disable:'
close: ':spellcheck-enable:'
# Ignore lines that start with two dots
- open: '^\s*\.\.'
close: '$'
Expand Down
5 changes: 5 additions & 0 deletions .github/workflows/config/spelling_allowlist.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,14 @@ APIs
AST
BFGS
CLA
CLI
CMake
COBYLA
CPTP
CPU
CPUs
CUDA
CuPy
DGX
DOI
Fourier
Expand All @@ -24,6 +26,7 @@ IQM
IonQ
JIT
JSON
Jupyter
Kraus
LLVM
LSB
Expand All @@ -49,6 +52,8 @@ QPUs
QTX
Quake
Quantinuum
RSA
TCP
Toffoli
VQE
Vazirani
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/publishing.yml
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ jobs:
github_commit=`cat "$name.txt" | grep -o 'source-sha: \S*' | cut -d ' ' -f 2`
release_title=`cat "$name.txt" | grep -o 'release-title: \S*' | cut -d ' ' -f 2`
release_version=`cat "$name.txt" | grep -o 'release-version: \S*' | cut -d ' ' -f 2`
elif [ "$name" == "cuda_quantum_docs" && ${{ github.event_name == 'workflow_dispatch' && inputs.include_docs }} ]; then
elif [ "$name" == "cuda_quantum_docs" ] && ${{ github.event_name == 'workflow_dispatch' && inputs.include_docs }}; then
docs_archive="$(pwd)/cuda_quantum_docs.zip"
gh api $url > "$docs_archive"
fi
Expand Down
3 changes: 2 additions & 1 deletion docker/release/cudaq.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
#
# Usage:
# Must be built from the repo root with:
# docker build -t ghcr.io/nvidia/cuda-quantum:latest -f docker/release/cudaq.Dockerfile .
# docker build -t nvcr.io/nvidia/nightly/cuda-quantum:latest-base -f docker/release/cudaq.Dockerfile .
#
# The build argument cudaqdev_image defines the CUDA Quantum dev image that contains the CUDA
# Quantum build. This Dockerfile copies the built components into the base_image. The specified
Expand Down Expand Up @@ -116,6 +116,7 @@ RUN env | egrep -v "^(HOME=|USER=|MAIL=|LC_ALL=|LS_COLORS=|LANG=|HOSTNAME=|PWD=|
RUN adduser --disabled-password --gecos '' cudaq && adduser cudaq sudo \
&& echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers \
&& mkdir -p /home/cudaq/.ssh && mkdir -p /var/run/sshd
ENV PATH="$PATH:/home/cudaq/.local/bin"

ADD ./docs/sphinx/examples/ /home/cudaq/examples/
ADD ./docker/release/README.md /home/cudaq/README.md
Expand Down
2 changes: 1 addition & 1 deletion docker/release/cudaq.ext.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #

ARG base_image=ghcr.io/nvidia/cuda-quantum:latest-base
ARG base_image=nvcr.io/nvidia/nightly/cuda-quantum:latest-base
FROM $base_image

USER root
Expand Down
10 changes: 7 additions & 3 deletions docs/sphinx/examples/cpp/algorithms/bernstein_vazirani.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,10 @@
#include <iostream>
#include <random>

#ifndef SIZE
#define SIZE 5
#endif

template <int nrOfBits>
std::bitset<nrOfBits> random_bits(int seed) {

Expand Down Expand Up @@ -58,10 +62,10 @@ struct bernstein_vazirani {
};

int main(int argc, char *argv[]) {
auto seed = 1 < argc ? atoi(argv[1]) : 1;
auto seed = 1 < argc ? atoi(argv[1]) : time(NULL);

// The number of qubits can be >32 when targeting the `nvidia-mgpu` backend.
const int nr_qubits = 28;
const int nr_qubits = SIZE;
auto bitvector = random_bits<nr_qubits>(seed);
auto kernel = bernstein_vazirani<nr_qubits>{};
auto counts = cudaq::sample(kernel, bitvector);
Expand All @@ -71,7 +75,7 @@ int main(int argc, char *argv[]) {
printf("Measured bitstring: %s\n\n", counts.most_probable().c_str());

for (auto &[bits, count] : counts) {
printf("observed %s (probability %u%%)\n", bits.data(),
printf("observed %s with %u%% probability\n", bits.data(),
100 * (uint)((double)count / 1000.));
}
}
Expand Down
60 changes: 39 additions & 21 deletions docs/sphinx/examples/python/bernstein_vazirani.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import argparse
import cudaq
import random

Expand Down Expand Up @@ -65,24 +66,41 @@ def bernstein_vazirani(qubit_count: int):
# If you have a NVIDIA GPU you can use this example to see
# that the GPU-accelerated backends can easily handle a
# larger number of qubits compared the CPU-only backend.

# Depending on the available memory on your GPU, you can
# set the number of qubits to around 30 qubits, and un-comment
# the `cudaq.set_target(nvidia)` line.

# Note: Without setting the target to the `nvidia` backend,
# a 30 qubit simulation simply seems to hang; that is
# because it takes a long time for the CPU-only backend
# to handle this number of qubits!

qubit_count = 5 # set to around 30 qubits for `nvidia` target
# ```
# cudaq.set_target("nvidia")
# ```

kernel, hidden_bitstring = bernstein_vazirani(qubit_count)
result = cudaq.sample(kernel)

print(f"encoded bitstring = {hidden_bitstring}")
print(f"measured state = {result.most_probable()}")
print(f"Were we successful? {hidden_bitstring == result.most_probable()}")
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog='python',
description='Run a Bernstein-Vazirani algorithm using CUDA Quantum.',
epilog=
'For more information about CUDA Quantum, see https://nvidia.github.io/cuda-quantum'
)
parser.add_argument('--size',
type=int,
required=False,
default=5,
help='The number of bits in the secret string.')
parser.add_argument('--target',
type=str,
required=False,
default='',
help='The target to execute the algorithm on.')
args = parser.parse_args()

# Depending on the available memory on your GPU, you can
# set the size of the secret string to around 28-32 when
# you pass the target `nvidia` as a command line argument.

# Note: Without setting the target to the `nvidia` backend,
# the program simply seems to hang; that is because it takes
# a long time for the CPU-only backend to simulate 28+ qubits!

qubit_count = args.size
if args.target and not args.target.isspace():
cudaq.set_target(args.target)

print(f"Running on target {cudaq.get_target().name} ...")
kernel, hidden_bitstring = bernstein_vazirani(qubit_count)
result = cudaq.sample(kernel)

print(f"encoded bitstring = {hidden_bitstring}")
print(f"measured state = {result.most_probable()}")
print(f"Were we successful? {hidden_bitstring == result.most_probable()}")
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"# Single Qubit Rotation \n",
"# Cost Minimization\n",
"\n",
"Below we start with a basic example of a hybrid variational algorithm which involves flipping the bloch vector of a qubit from the $\\ket{0}$ to the $\\ket{1}$ state. First we import the relevant packages and set our backend to simulate our workflow on NVIDIA GPUs. \n",
"\n",
Expand Down Expand Up @@ -64,8 +64,6 @@
"outputs": [],
"source": [
"cost_values = []\n",
"cost_values.append(initial_cost_value)\n",
"\n",
"\n",
"def cost(parameters):\n",
" \"\"\"Returns the expectation value as our cost.\"\"\"\n",
Expand Down Expand Up @@ -115,6 +113,15 @@
"result = optimizer.optimize(dimensions=2, function=cost)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install matplotlib"
]
},
{
"cell_type": "code",
"execution_count": 19,
Expand Down Expand Up @@ -147,7 +154,6 @@
],
"source": [
"# Plotting how the value of the cost function decreases during the minimization procedure.\n",
"# !pip install matplotlib\n",
"import matplotlib.pyplot as plt\n",
"\n",
"x_values = list(range(len(cost_values)))\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
"qubit_count = 2\n",
"\n",
"# Define the simulation target.\n",
"cudaq.set_target(\"nvidia\")\n",
"cudaq.set_target(\"qpp-cpu\")\n",
"\n",
"# Define a quantum kernel function.\n",
"kernel = cudaq.make_kernel()\n",
Expand Down Expand Up @@ -87,7 +87,7 @@
"qubit_count = 2\n",
"\n",
"# Define the simulation target.\n",
"cudaq.set_target(\"nvidia\")\n",
"cudaq.set_target(\"qpp-cpu\")\n",
"\n",
"# Define a quantum kernel function.\n",
"kernel = cudaq.make_kernel()\n",
Expand Down
11 changes: 9 additions & 2 deletions docs/sphinx/examples/python/tutorials/hybrid_qnns.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,15 @@
"We perform binary classification on the MNIST dataset where data flows through the neural network architecture to the quantum circuit whose output is used to classify hand written digits."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install matplotlib torch torchvision"
]
},
{
"cell_type": "code",
"execution_count": 1,
Expand All @@ -30,8 +39,6 @@
"source": [
"# Import the relevant packages\n",
"\n",
"# !pip install matplotlib torch torchvision\n",
"\n",
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
"\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@
}
],
"source": [
"cpu_result = ghz_state(n_qubits=2, target=\"qpp-cpu\")\n",
"cpu_result = ghz_state(qubit_count=2, target=\"qpp-cpu\")\n",
"\n",
"cpu_result.dump()"
]
Expand Down Expand Up @@ -122,7 +122,7 @@
}
],
"source": [
"gpu_result = ghz_state(n_qubits=25, target=\"nvidia\")\n",
"gpu_result = ghz_state(qubit_count=25, target=\"nvidia\")\n",
"\n",
"gpu_result.dump()"
]
Expand Down
2 changes: 1 addition & 1 deletion docs/sphinx/examples/python/tutorials/vqe.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
"metadata": {},
"outputs": [],
"source": [
"# !pip install openfermionpyscf"
"%pip install openfermionpyscf"
]
},
{
Expand Down
Loading

0 comments on commit ab92691

Please sign in to comment.