diff --git a/.github/workflows/clean_up.yml b/.github/workflows/clean_up.yml index f8df83b51f..7d54aa58a9 100644 --- a/.github/workflows/clean_up.yml +++ b/.github/workflows/clean_up.yml @@ -42,7 +42,7 @@ jobs: strategy: matrix: - image_name: [cuda-quantum-dev, cuda-quantum-devdeps, open-mpi] # cuda-quantum + image_name: [cuda-quantum-dev, cuda-quantum-devdeps, open-mpi] fail-fast: false steps: diff --git a/.github/workflows/config/md_link_check_config.json b/.github/workflows/config/md_link_check_config.json index 9c738d04c3..4f98252a88 100644 --- a/.github/workflows/config/md_link_check_config.json +++ b/.github/workflows/config/md_link_check_config.json @@ -17,6 +17,9 @@ }, { "pattern": "^https://epubs.siam.org/doi/10.1137/S0097539796300921" + }, + { + "pattern": "^https://vscode.dev/" } ] } \ No newline at end of file diff --git a/.github/workflows/config/spellcheck_config.yml b/.github/workflows/config/spellcheck_config.yml index d3934d8542..a275754f14 100644 --- a/.github/workflows/config/spellcheck_config.yml +++ b/.github/workflows/config/spellcheck_config.yml @@ -66,6 +66,9 @@ matrix: - pyspelling.filters.context: context_visible_first: true delimiters: + # Ignore multiline content fenced by .. spellcheck-disable and .. spellcheck-enable + - open: '(?s):spellcheck-disable:' + close: ':spellcheck-enable:' # Ignore lines that start with two dots - open: '^\s*\.\.' close: '$' diff --git a/.github/workflows/config/spelling_allowlist.txt b/.github/workflows/config/spelling_allowlist.txt index 3852208bb6..efef8d09bb 100644 --- a/.github/workflows/config/spelling_allowlist.txt +++ b/.github/workflows/config/spelling_allowlist.txt @@ -3,12 +3,14 @@ APIs AST BFGS CLA +CLI CMake COBYLA CPTP CPU CPUs CUDA +CuPy DGX DOI Fourier @@ -24,6 +26,7 @@ IQM IonQ JIT JSON +Jupyter Kraus LLVM LSB @@ -49,6 +52,8 @@ QPUs QTX Quake Quantinuum +RSA +TCP Toffoli VQE Vazirani diff --git a/.github/workflows/publishing.yml b/.github/workflows/publishing.yml index e2a4c898dd..2d3feb9afb 100644 --- a/.github/workflows/publishing.yml +++ b/.github/workflows/publishing.yml @@ -105,7 +105,7 @@ jobs: github_commit=`cat "$name.txt" | grep -o 'source-sha: \S*' | cut -d ' ' -f 2` release_title=`cat "$name.txt" | grep -o 'release-title: \S*' | cut -d ' ' -f 2` release_version=`cat "$name.txt" | grep -o 'release-version: \S*' | cut -d ' ' -f 2` - elif [ "$name" == "cuda_quantum_docs" && ${{ github.event_name == 'workflow_dispatch' && inputs.include_docs }} ]; then + elif [ "$name" == "cuda_quantum_docs" ] && ${{ github.event_name == 'workflow_dispatch' && inputs.include_docs }}; then docs_archive="$(pwd)/cuda_quantum_docs.zip" gh api $url > "$docs_archive" fi diff --git a/docker/release/cudaq.Dockerfile b/docker/release/cudaq.Dockerfile index 7d3279a0d3..978185622e 100644 --- a/docker/release/cudaq.Dockerfile +++ b/docker/release/cudaq.Dockerfile @@ -16,7 +16,7 @@ # # Usage: # Must be built from the repo root with: -# docker build -t ghcr.io/nvidia/cuda-quantum:latest -f docker/release/cudaq.Dockerfile . +# docker build -t nvcr.io/nvidia/nightly/cuda-quantum:latest-base -f docker/release/cudaq.Dockerfile . # # The build argument cudaqdev_image defines the CUDA Quantum dev image that contains the CUDA # Quantum build. This Dockerfile copies the built components into the base_image. The specified @@ -116,6 +116,7 @@ RUN env | egrep -v "^(HOME=|USER=|MAIL=|LC_ALL=|LS_COLORS=|LANG=|HOSTNAME=|PWD=| RUN adduser --disabled-password --gecos '' cudaq && adduser cudaq sudo \ && echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers \ && mkdir -p /home/cudaq/.ssh && mkdir -p /var/run/sshd +ENV PATH="$PATH:/home/cudaq/.local/bin" ADD ./docs/sphinx/examples/ /home/cudaq/examples/ ADD ./docker/release/README.md /home/cudaq/README.md diff --git a/docker/release/cudaq.ext.Dockerfile b/docker/release/cudaq.ext.Dockerfile index 6241d8ea04..7899841fb5 100644 --- a/docker/release/cudaq.ext.Dockerfile +++ b/docker/release/cudaq.ext.Dockerfile @@ -6,7 +6,7 @@ # the terms of the Apache License 2.0 which accompanies this distribution. # # ============================================================================ # -ARG base_image=ghcr.io/nvidia/cuda-quantum:latest-base +ARG base_image=nvcr.io/nvidia/nightly/cuda-quantum:latest-base FROM $base_image USER root diff --git a/docs/sphinx/examples/cpp/algorithms/bernstein_vazirani.cpp b/docs/sphinx/examples/cpp/algorithms/bernstein_vazirani.cpp index 5aa50278b4..ba3297bcb6 100644 --- a/docs/sphinx/examples/cpp/algorithms/bernstein_vazirani.cpp +++ b/docs/sphinx/examples/cpp/algorithms/bernstein_vazirani.cpp @@ -15,6 +15,10 @@ #include #include +#ifndef SIZE +#define SIZE 5 +#endif + template std::bitset random_bits(int seed) { @@ -58,10 +62,10 @@ struct bernstein_vazirani { }; int main(int argc, char *argv[]) { - auto seed = 1 < argc ? atoi(argv[1]) : 1; + auto seed = 1 < argc ? atoi(argv[1]) : time(NULL); // The number of qubits can be >32 when targeting the `nvidia-mgpu` backend. - const int nr_qubits = 28; + const int nr_qubits = SIZE; auto bitvector = random_bits(seed); auto kernel = bernstein_vazirani{}; auto counts = cudaq::sample(kernel, bitvector); @@ -71,7 +75,7 @@ int main(int argc, char *argv[]) { printf("Measured bitstring: %s\n\n", counts.most_probable().c_str()); for (auto &[bits, count] : counts) { - printf("observed %s (probability %u%%)\n", bits.data(), + printf("observed %s with %u%% probability\n", bits.data(), 100 * (uint)((double)count / 1000.)); } } diff --git a/docs/sphinx/examples/python/bernstein_vazirani.py b/docs/sphinx/examples/python/bernstein_vazirani.py index d430b0e9c0..ecb62a3104 100644 --- a/docs/sphinx/examples/python/bernstein_vazirani.py +++ b/docs/sphinx/examples/python/bernstein_vazirani.py @@ -1,3 +1,4 @@ +import argparse import cudaq import random @@ -65,24 +66,41 @@ def bernstein_vazirani(qubit_count: int): # If you have a NVIDIA GPU you can use this example to see # that the GPU-accelerated backends can easily handle a # larger number of qubits compared the CPU-only backend. - -# Depending on the available memory on your GPU, you can -# set the number of qubits to around 30 qubits, and un-comment -# the `cudaq.set_target(nvidia)` line. - -# Note: Without setting the target to the `nvidia` backend, -# a 30 qubit simulation simply seems to hang; that is -# because it takes a long time for the CPU-only backend -# to handle this number of qubits! - -qubit_count = 5 # set to around 30 qubits for `nvidia` target -# ``` -# cudaq.set_target("nvidia") -# ``` - -kernel, hidden_bitstring = bernstein_vazirani(qubit_count) -result = cudaq.sample(kernel) - -print(f"encoded bitstring = {hidden_bitstring}") -print(f"measured state = {result.most_probable()}") -print(f"Were we successful? {hidden_bitstring == result.most_probable()}") +if __name__ == '__main__': + parser = argparse.ArgumentParser( + prog='python', + description='Run a Bernstein-Vazirani algorithm using CUDA Quantum.', + epilog= + 'For more information about CUDA Quantum, see https://nvidia.github.io/cuda-quantum' + ) + parser.add_argument('--size', + type=int, + required=False, + default=5, + help='The number of bits in the secret string.') + parser.add_argument('--target', + type=str, + required=False, + default='', + help='The target to execute the algorithm on.') + args = parser.parse_args() + + # Depending on the available memory on your GPU, you can + # set the size of the secret string to around 28-32 when + # you pass the target `nvidia` as a command line argument. + + # Note: Without setting the target to the `nvidia` backend, + # the program simply seems to hang; that is because it takes + # a long time for the CPU-only backend to simulate 28+ qubits! + + qubit_count = args.size + if args.target and not args.target.isspace(): + cudaq.set_target(args.target) + + print(f"Running on target {cudaq.get_target().name} ...") + kernel, hidden_bitstring = bernstein_vazirani(qubit_count) + result = cudaq.sample(kernel) + + print(f"encoded bitstring = {hidden_bitstring}") + print(f"measured state = {result.most_probable()}") + print(f"Were we successful? {hidden_bitstring == result.most_probable()}") diff --git a/docs/sphinx/examples/python/tutorials/single_qubit_rotation.ipynb b/docs/sphinx/examples/python/tutorials/cost_minimization.ipynb similarity index 99% rename from docs/sphinx/examples/python/tutorials/single_qubit_rotation.ipynb rename to docs/sphinx/examples/python/tutorials/cost_minimization.ipynb index 28921c2de1..52f3a8a119 100644 --- a/docs/sphinx/examples/python/tutorials/single_qubit_rotation.ipynb +++ b/docs/sphinx/examples/python/tutorials/cost_minimization.ipynb @@ -5,7 +5,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Single Qubit Rotation \n", + "# Cost Minimization\n", "\n", "Below we start with a basic example of a hybrid variational algorithm which involves flipping the bloch vector of a qubit from the $\\ket{0}$ to the $\\ket{1}$ state. First we import the relevant packages and set our backend to simulate our workflow on NVIDIA GPUs. \n", "\n", @@ -64,8 +64,6 @@ "outputs": [], "source": [ "cost_values = []\n", - "cost_values.append(initial_cost_value)\n", - "\n", "\n", "def cost(parameters):\n", " \"\"\"Returns the expectation value as our cost.\"\"\"\n", @@ -115,6 +113,15 @@ "result = optimizer.optimize(dimensions=2, function=cost)" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install matplotlib" + ] + }, { "cell_type": "code", "execution_count": 19, @@ -147,7 +154,6 @@ ], "source": [ "# Plotting how the value of the cost function decreases during the minimization procedure.\n", - "# !pip install matplotlib\n", "import matplotlib.pyplot as plt\n", "\n", "x_values = list(range(len(cost_values)))\n", diff --git a/docs/sphinx/examples/python/tutorials/executing_circuits.ipynb b/docs/sphinx/examples/python/tutorials/executing_circuits.ipynb index d879ecd1bf..256671626a 100644 --- a/docs/sphinx/examples/python/tutorials/executing_circuits.ipynb +++ b/docs/sphinx/examples/python/tutorials/executing_circuits.ipynb @@ -33,7 +33,7 @@ "qubit_count = 2\n", "\n", "# Define the simulation target.\n", - "cudaq.set_target(\"nvidia\")\n", + "cudaq.set_target(\"qpp-cpu\")\n", "\n", "# Define a quantum kernel function.\n", "kernel = cudaq.make_kernel()\n", @@ -87,7 +87,7 @@ "qubit_count = 2\n", "\n", "# Define the simulation target.\n", - "cudaq.set_target(\"nvidia\")\n", + "cudaq.set_target(\"qpp-cpu\")\n", "\n", "# Define a quantum kernel function.\n", "kernel = cudaq.make_kernel()\n", diff --git a/docs/sphinx/examples/python/tutorials/hybrid_qnns.ipynb b/docs/sphinx/examples/python/tutorials/hybrid_qnns.ipynb index 801d11320f..8c513a7a1f 100644 --- a/docs/sphinx/examples/python/tutorials/hybrid_qnns.ipynb +++ b/docs/sphinx/examples/python/tutorials/hybrid_qnns.ipynb @@ -22,6 +22,15 @@ "We perform binary classification on the MNIST dataset where data flows through the neural network architecture to the quantum circuit whose output is used to classify hand written digits." ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install matplotlib torch torchvision" + ] + }, { "cell_type": "code", "execution_count": 1, @@ -30,8 +39,6 @@ "source": [ "# Import the relevant packages\n", "\n", - "# !pip install matplotlib torch torchvision\n", - "\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "\n", diff --git a/docs/sphinx/examples/python/tutorials/multi_gpu_workflows.ipynb b/docs/sphinx/examples/python/tutorials/multi_gpu_workflows.ipynb index 8958435edc..1197b01c96 100644 --- a/docs/sphinx/examples/python/tutorials/multi_gpu_workflows.ipynb +++ b/docs/sphinx/examples/python/tutorials/multi_gpu_workflows.ipynb @@ -93,7 +93,7 @@ } ], "source": [ - "cpu_result = ghz_state(n_qubits=2, target=\"qpp-cpu\")\n", + "cpu_result = ghz_state(qubit_count=2, target=\"qpp-cpu\")\n", "\n", "cpu_result.dump()" ] @@ -122,7 +122,7 @@ } ], "source": [ - "gpu_result = ghz_state(n_qubits=25, target=\"nvidia\")\n", + "gpu_result = ghz_state(qubit_count=25, target=\"nvidia\")\n", "\n", "gpu_result.dump()" ] diff --git a/docs/sphinx/examples/python/tutorials/vqe.ipynb b/docs/sphinx/examples/python/tutorials/vqe.ipynb index f0e6090f4d..017c45bce9 100644 --- a/docs/sphinx/examples/python/tutorials/vqe.ipynb +++ b/docs/sphinx/examples/python/tutorials/vqe.ipynb @@ -15,7 +15,7 @@ "metadata": {}, "outputs": [], "source": [ - "# !pip install openfermionpyscf" + "%pip install openfermionpyscf" ] }, { diff --git a/docs/sphinx/install.rst b/docs/sphinx/install.rst index 0535032689..4388f0b472 100644 --- a/docs/sphinx/install.rst +++ b/docs/sphinx/install.rst @@ -1,128 +1,187 @@ -CUDA Quantum Open Beta Installation +Getting Started ******************************************* -Docker Image --------------------- +This guide walks through how to :ref:`install CUDA Quantum ` on your system, and how to set up :ref:`VS Code for local development `. +The section on :ref:`connecting to a remote host ` contains some +guidance for application development on a remote host where CUDA Quantum is installed. -Install the Docker Image -++++++++++++++++++++++++++++++++++++ +.. _install-cuda-quantum: -Docker images for all CUDA Quantum releases are available on the `NGC Container Registry`_. -To download images from NGC, please follow the following steps if you have not done so already: +Local Installation +------------------------------------ -- `Create an account `__ -- `Sign in `__ to access your account and go to `Setup `__. -- Click on `Get API Key` and generate a new key (this will invalidate any existing keys). -- Follow the instructions that appear to use that key to log in to the NGC registry using Docker. +The following sections contain instructions for how to install CUDA Quantum on your machine using -Once you have done so, run `docker login nvcr.io` (again) to confirm you can authenticate with the registry. -You should see a message "Login Succeeded". +- :ref:`**Docker** `: A fully featured CUDA Quantum installation including all C++ and Python tools is available as a `Docker `__ image. +- :ref:`**Singularity** `: A `Singularity `__ container can easily be created based on our Docker images. +- :ref:`**PyPI** `: Additionally, we distribute pre-built Python wheels via `PyPI `__. -.. _NGC Container Registry: https://catalog.ngc.nvidia.com/orgs/nvidia/containers/cuda-quantum +If you would like to build CUDA Quantum from source instead, please follow the instructions on the `CUDA Quantum GitHub repository`_. + +.. _CUDA Quantum GitHub repository: https://github.com/NVIDIA/cuda-quantum/blob/main/Building.md + +If you are unsure which option suits you best, we recommend using our :ref:`Docker image ` to develop your applications in a controlled environment that does not depend on, or interfere with, other software +that is installed on your system. + +.. _install-docker-image: + +Docker +++++++++++++++++++++++++++++++++++++ -In addition to publishing stable releases, we also publish docker images whenever we update -certain branches of our `GitHub repository `_. -These images are published in a separate location `nvidia/nightly` on NGC, as well as on GitHub. +To download and use our Docker images, you will need to install and launch the Docker engine. +If you do not already have Docker installed on your system, you can get it by downloading and installing `Docker Desktop `_. +If you do not have the necessary administrator permissions to install software on your machine, +take a look at the section below on how to use `Singularity`_ instead. + +Docker images for all CUDA Quantum releases are available on the `NGC Container Registry`_. +In addition to publishing `stable releases `__, +we also publish Docker images whenever we update certain branches on our `GitHub repository `_. +These images are published in our `nightly channel on NGC `__. To download the latest version on the main branch of our GitHub repository, for example, use the command .. code-block:: console docker pull nvcr.io/nvidia/nightly/cuda-quantum:latest +.. _NGC Container Registry: https://catalog.ngc.nvidia.com/orgs/nvidia/containers/cuda-quantum + Early prototypes for features we are considering can be tried out by using the image tags starting with `experimental`. The `README` in the `/home/cudaq` folder in the container gives more details about the feature. We welcome and appreciate your feedback about these early prototypes; how popular they are will help inform whether we should include them in future releases. -.. _use-cuda-quantum-in-terminal: +Once you have downloaded an image, the container can be run using the command -Use CUDA Quantum in a Terminal -+++++++++++++++++++++++++++++++++++++ +.. code-block:: console -The container can be run using the following command + docker run -it --name cuda-quantum nvcr.io/nvidia/nightly/cuda-quantum:latest + +Replace the image name and/or tag in the command above, if necessary, with the one you want to use. +This will give you terminal access to the created container. To enable support +for GPU-accelerated backends, you will need to pass the :code:`--gpus` flag when launching +the container, for example: .. code-block:: console - docker run -it --name cuda-quantum + docker run -it --gpus all --name cuda-quantum nvcr.io/nvidia/nightly/cuda-quantum:latest + +.. note:: + + This command will fail if you do not have a suitable NVIDIA GPU available, or if your driver + version is insufficient. To improve compatibility with older drivers, you may need to install the + `NVIDIA container toolkit`_. -replacing :code:`` with the name and tag of the image you downloaded. +.. _NVIDIA container toolkit: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html -This will give you terminal access to the created container, for example +You can stop and exit the container by typing the command :code:`exit`. If you did not specify +:code:`--rm` flag when launching the container, the container still exists after exiting, as well as any +changes you made in it. You can get back to it using +the command :code:`docker start -i cuda-quantum`. +You can delete an existing container and any changes you made using :code:`docker rm -v cuda-quantum`. -.. code-block:: console +When working with Docker images, the files inside the container are not visible outside the container environment. +To facilitate application development with, for example, debugging, code completion, hover information, and so on, +please take a look at the section on :ref:`Development with VS Code `. - user@host:~$ docker run -it --name cuda-quantum nvcr.io/nvidia/nightly/cuda-quantum:latest - To run a command as administrator (user "root"), use "sudo ". - See "man sudo_root" for details. +Alternatively, it is possible, but not recommended, to launch an SSH server inside the container environment and connect an IDE using SSH. To do so, make sure you have generated a suitable RSA key pair; if your `~/.ssh/` folder does not already contain the files `id_rsa.pub` and `id.rsa`, +follow the instructions for generating a new SSH key on `this page `__. +You can then launch the container and connect to it via SSH by executing the following commands: - ========================= - NVIDIA CUDA Quantum - ========================= +.. code-block:: console - Version: latest + docker run -itd --gpus all --name cuda-quantum -p 2222:22 nvcr.io/nvidia/nightly/cuda-quantum:latest + docker exec cuda-quantum bash -c "sudo apt-get install -y --no-install-recommends openssh-server" + docker exec cuda-quantum bash -c "sudo sed -i -E "s/#?\s*UsePAM\s+.+/UsePAM yes/g" /etc/ssh/sshd_config" + docker cp ~/.ssh/id_rsa.pub cuda-quantum:/home/cudaq/.ssh/authorized_keys + docker exec -d cuda-quantum bash -c "sudo -E /usr/sbin/sshd -D" + ssh cudaq@localhost -p 2222 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o GlobalKnownHostsFile=/dev/null - Copyright (c) 2023 NVIDIA Corporation & Affiliates - All rights reserved. - cudaq@container:~$ ls examples/ - cpp python +.. _install-singularity-image: -.. note:: +Singularity +++++++++++++++++++++++++++++++++++++ - If you have NVIDIA GPUs available and NVIDIA Docker correctly configured, - you can add :code:`--gpus all` to the :code:`docker run` command to expose all available GPUs - to the container, or :code:`--gpus '"device=1"'` to select a specific GPU device. - Unless you specify this flag, you will not be able to compile to the :code:`--target nvidia` - target. +You can use `Singularity `__ to run a CUDA Quantum container in a folder without needing administrator permissions. +If you do not already have Singularity installed, you can build a relocatable installation from source. +To do so on Linux or WSL, make sure you have the `necessary prerequisites `__ installed, download a suitable version of the `go toolchain `__, and make sure the `go` binaries are on your :code:`PATH`. You can then build Singularity with the commands -.. note:: +.. code-block:: console - If you would like a temporary container, pass :code:`--rm`. This will delete your - container upon exit. + wget https://github.com/sylabs/singularity/releases/download/v4.0.1/singularity-ce-4.0.1.tar.gz + tar -xzf singularity-ce-4.0.1.tar.gz singularity-ce-4.0.1/ && rm singularity-ce-4.0.1.tar.gz && cd singularity-ce-4.0.1/ + ./mconfig --without-suid --prefix="$HOME/.local/singularity" + make -C ./builddir && make -C ./builddir install && cd .. && rm -rf singularity-ce-4.0.1/ + echo 'PATH="$PATH:$HOME/.local/singularity/bin/"' >> ~/.profile && source ~/.profile -You can stop and exit the container by typing the command :code:`exit`. If you did not specify -:code:`--rm`, the container still exists as well as any changes you made in it. You can get back to it using -the command :code:`docker start -i cuda-quantum`. -You can delete an existing container and any changes you made using :code:`docker rm -v cuda-quantum`. +For more information about using Singularity on other systems, take a look at the `admin guide `__. -Use CUDA Quantum in VS Code -+++++++++++++++++++++++++++++++++++++ +Once you have singularity installed, create a file `cuda-quantum.def` with the following content: -If you have `VS Code`_ installed, you can use it to work inside your container. -To do so, install the `Dev Containers extension`_: +.. code-block:: console -.. image:: _static/devContainersExtension.png + Bootstrap: docker + From: nvcr.io/nvidia/nightly/cuda-quantum:latest -Follow the steps :ref:`above` to start the container. -Open VS Code and navigate to the Remote Explorer. You should see the running cuda-quantum development container listed there. + %runscript + mount devpts /dev/pts -t devpts + cp -r /home/cudaq/* . + bash -.. image:: _static/attachToDevContainer.png +Replace the image name and/or tag in the `From` line, if necessary, with the one you want to use; +In addition to publishing `stable releases `__, +we also publish Docker images whenever we update certain branches on our `GitHub repository `_. +These images are published in our `nightly channel on NGC `__. +Early prototypes for features we are considering can be tried out by using the image tags starting +with `experimental`. We welcome and appreciate your feedback about these early prototypes; +how popular they are will help inform whether we should include them in future releases. -Click on :code:`Attach to Container`. A new VS Code instance will open in that container. Open the `/home/cudaq` -folder to see the `README.md` file and the CUDA Quantum examples that are included in the container. To run the examples, -open a terminal by going to the Terminal menu and select :code:`New Terminal`. +You can then create a CUDA Quantum container by running the following commands: -.. image:: _static/openTerminal.png +.. code-block:: console -You can now compile and run the C++ examples using the :code:`nvq++` compiler, which is installed in your :code:`PATH`, -or run the Python examples using the Python interpreter. + singularity build --fakeroot cuda-quantum.sif cuda-quantum.def + singularity run --writable --fakeroot cuda-quantum.sif -.. image:: _static/getToWork.png +In addition to the files in your current folder, you should now +see a `README` file, as well as examples and tutorials. +To enable support for GPU-accelerated backends, you will need to pass the +the :code:`--nv` flag when running the container: -.. _VS Code: https://code.visualstudio.com/download -.. _Dev Containers extension: https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers -.. _command palette: https://code.visualstudio.com/docs/getstarted/userinterface#_command-palette +.. code-block:: console + + singularity run --writable --fakeroot --nv cuda-quantum.sif + nvidia-smi + +The output of the command above lists the GPUs that are visible and accessible in the container environment. .. note:: - VS Code extensions that you have installed locally, such as e.g. an extension for Jupyter notebooks, - may not be automatically active in the container environment. You may need to install your preferred - extension in the container environment for all of your development tools to be available. + If you do not see any GPUs listed in the output of `nvidia-smi`, + it means the container environment is unable to access a suitable NVIDIA GPU. + This can happen if your driver version is insufficient, or if you are + working on WSL. To improve compatibility with older drivers, or to enable GPU support + on WSL, please install the `NVIDIA container toolkit`_, and update the singularity configuration + to set `use nvidia-container-cli` to `yes` and configure the correct `nvidia-container-cli path`. + The two commands below use `sed` to do that: + + .. code-block:: console + + sed -i 's/use nvidia-container-cli = no/use nvidia-container-cli = yes/' "$HOME/.local/singularity/etc/singularity/singularity.conf" + sed -i 's/# nvidia-container-cli path =/nvidia-container-cli path = \/usr\/bin\/nvidia-container-cli/' "$HOME/.local/singularity/etc/singularity/singularity.conf" + +You can exit the container environment by typing the command :code:`exit`. +Any changes you made will still be visible after you exit the container, and you can re-enable the +container environment at any time using the `run` command above. + +To facilitate application development with, for example, debugging, code completion, hover information, and so on, +please take a look at the section on :ref:`Development with VS Code `. + .. _install-python-wheels: Python wheels --------------------- +++++++++++++++++++++++++++++++++++++ CUDA Quantum Python wheels are available on `PyPI.org `__. Installation instructions can be found in the `project description `__. For more information about available versions and documentation, @@ -140,15 +199,256 @@ To build the CUDA Quantum Python API from source using pip, run the following co cd cuda-quantum && ./scripts/install_prerequisites.sh pip install . -For more information about building the entire C++ and Python API's, please see `Building from Source`_. +For more information about building the entire C++ and Python API from source, we refer to the `CUDA Quantum GitHub repository`_. -Building from Source ------------------------------- +.. _local-development-with-vscode: -Instructions for building the Python wheels from source are given in the section :ref:`install-python-wheels`. -For more information about building the entire C++ and Python API from source, we refer to the `CUDA Quantum GitHub repository`_. +Development with VS Code +------------------------------------ -.. _CUDA Quantum GitHub repository: https://github.com/NVIDIA/cuda-quantum/blob/main/Building.md +To facilitate application development with, for example, debugging, code completion, hover information, and so on, +we recommend using `VS Code `_. VS Code provides a seamless +development experience on all platforms, and is also available without installation via web browser. +This sections describes how to connect VS Code to a running container on your machine. +The section on :ref:`connecting to a remote host ` contains information on +how to set up your development environment when accessing CUDA Quantum on a remote host instead. + +.. _docker-in-vscode: + +Using a Docker container +++++++++++++++++++++++++++++++++++++++++ + +Before connecting VS Code, open a terminal/shell, +and start the CUDA Quantum Docker container following the +instructions in the :ref:`section above `. + +If you have a local installation of `VS Code `_ +you can connect to the running container using the +`Dev Containers `__ extension. If you want to use VS Code in the web browser, please follow the instructions +in the section `Developing with Remote Tunnels`_ instead. + +.. |:spellcheck-disable:| replace:: \ +.. |:spellcheck-enable:| replace:: \ + +After installing the +`Dev Containers `__ extension, launch VS Code, open the Command Palette with `Ctrl+Shift+P`, and enter +|:spellcheck-disable:|"Dev Containers: Attach to Running Container"|:spellcheck-enable:|. +You should see and select the running `cuda-quantum` container in the list. +After the window reloaded, enter "File: Open Folder" in the Command Palette to open the `/home/cudaq/` folder. + +To run the examples, open the Command Palette and enter "View: Show Terminal" +to launch an integrated terminal. You are now all set to +:ref:`get started ` with CUDA Quantum development. + +.. _singularity-in-vscode: + +Using a Singularity container +++++++++++++++++++++++++++++++++++++++++ + +If you have a GitHub or Microsoft account, we recommend that you connect +to a CUDA Quantum container using tunnels. To do so, launch a CUDA Quantum Singularity +container following the instructions in the :ref:`section above `, +and then follow the instructions in the section `Developing with Remote Tunnels`_. + +If you cannot use tunnels, you need a local installation of +`VS Code `_ and you need to install +the `Remote - SSH `__ extension. +Make sure you also have a suitable SSH key pair; if your `~/.ssh/` folder does not already contain +the files `id_rsa.pub` and `id.rsa`, follow the instructions for generating a new SSH key on +`this page `__. + +To connect VS Code to a running CUDA Quantum container, +the most convenient setup is to install and run an SSH server +in the Singularity container. Open a terminal/shell in a separate window, +and enter the following commands to create a suitable sandbox: + +.. code-block:: console + + singularity build --sandbox cuda-quantum-sandbox cuda-quantum.sif + singularity exec --writable --fakeroot cuda-quantum-sandbox \ + apt-get install -y --no-install-recommends openssh-server + cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys + +You can launch this sandbox by entering the commands below. Please see the `Singularity`_ section above +for more information about how to get the `cuda-quantum.sif` image, and how to enable GPU-acceleration +with the `--nv` flag. + +.. code-block:: console + + singularity run --writable --fakeroot --nv --network-args="portmap=22:2222/tcp" cuda-quantum-sandbox + /usr/sbin/sshd -D -p 2222 -E sshd_output.txt + +.. note:: + + Make sure to use a free port. You can check if the SSH server is ready and listening + by looking at the log in `sshd_output.txt`. If the port is already in use, you can + replace the number `2222` by any free TCP port in the range `1025-65535` in all + commands. + +Entering `Ctrl+C` followed by `exit` will stop the running container. You can re-start +it at any time by entering the two commands above. While the container is running, +open the Command Palette in VS Code with `Ctrl+Shift+P`, enter "Remote-SSH: Add new +SSH Host", and enter the following SSH command: + +.. code-block:: console + + ssh root@localhost -p 2222 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o GlobalKnownHostsFile=/dev/null + +.. note:: + + If you are working on Windows and are building and running the Singularity container in WSL, + make sure to copy the used SSH keys to the Windows partition, such that VS Code can connect with + the expected key. Alternatively, add the used public key to the `/root/.ssh/authorized_keys` file in + the Singularity container. + +You can then connect to the host by opening the Command Palette, entering +"Remote SSH: Connect Current Window to Host", and choosing the newly created host. +After the window reloaded, enter "File: Open Folder" in the +Command Palette to open the desired folder. + +To run the examples, open the Command Palette and enter "View: Show Terminal" +to launch an integrated terminal. You are now all set to +:ref:`get started ` with CUDA Quantum development. + + +.. _connect-to-remote: + +Connecting to a Remote Host +------------------------------------ + +Depending on the setup on the remote host, there are a couple of different options +for developing CUDA Quantum applications. + +- If a CUDA Quantum container is running on the remote host, + and you have a GitHub or Microsoft account, take a look at + `Developing with Remote Tunnels`_. This works for both Docker + and Singularity containers on the remote host, and should also + work for other containers. +- If you cannot use tunnels, or if you want to work with an + existing CUDA Quantum installation without using a container, + take a look at `Remote Access via SSH`_ instead. + +.. _connect-vscode-via-tunnel: + +Developing with Remote Tunnels +++++++++++++++++++++++++++++++++++++ + +`Remote access via tunnel `__ +can easily be enabled with the `VS Code CLI `__. +This allows to connect either a local installation of `VS Code `_, +or the `VS Code Web UI `__, to a running CUDA Quantum container on the same or a different machine. + +Creating a secure connection requires authenticating with the same GitHub or Microsoft account on each end. +Once authenticated, an SSH connection over the tunnel provides end-to-end encryption. To download the CLI and +create a tunnel, execute the commands + +.. code-block:: console + + os=$([ "$(uname -m)" == "aarch64" ] && echo cli-alpine-arm64 || echo cli-alpine-x64) + curl -Lk "https://code.visualstudio.com/sha/download?build=stable&os=$os" --output vscode_cli.tar.gz + tar -xf vscode_cli.tar.gz && rm vscode_cli.tar.gz && sudo mv code /usr/bin/ + code tunnel --name cuda-quantum-remote --accept-server-license-terms + +in the running CUDA Quantum container, and follow the instructions to authenticate. +You can then either `open VS Code in a web browser `__, or connect a local installation of VS Code. +To connect a local installation of VS Code, make sure you have the +`Remote - Tunnels `__ extension installed, +then open the Command Palette with `Ctrl+Shift+P`, enter "Remote Tunnels: Connect to Tunnel", +and enter `cuda-quantum-remote`. After the window reloaded, enter "File: Open Folder" in the Command Palette +to open the `/home/cudaq/` folder. + +You should see a pop up asking if you want to install the recommended extensions. Selecting to install them will +configure VS Code with extensions for working with C++, Python, and Jupyter. +You can always see the list of recommended extensions that aren't installed yet by clicking on the "Extensions" icon in the sidebar and navigating to the "Recommended" tab. + +Remote Access via SSH +++++++++++++++++++++++++++++++++++++ + +To facilitate application development with, for example, debugging, code completion, hover information, and so on, +you can connect a local installation of `VS Code `_ to a remote host via SSH. + +.. note:: + + For the best user experience, we recommend to launch a CUDA Quantum container on the remote host, + and then connect :ref:`VS Code using tunnels `. + If a connection via tunnel is not possible, this section describes using SSH instead. + +To do so, make sure you have `Remote - SSH `__ extension installed. +Open the Command Palette with `Ctrl+Shift+P`, enter "Remote-SSH: Add new +SSH Host", and enter the SSH command to connect to your account on the remote host. +You can then connect to the host by opening the Command Palette, entering +"Remote SSH: Connect Current Window to Host", and choosing the newly created host. + +When prompted, choose Linux as the operating system, and enter your +password. After the window reloaded, enter "File: Open Folder" in the +Command Palette to open the desired folder. Our GitHub repository contains +a folder with VS Code configurations including a list of recommended extensions for +working with CUDA Quantum; you can copy `these configurations `__ into the a folder named `.vscode` in your workspace to use them. + +If you want to work with an existing CUDA Quantum installation on the remote host, you are all set. +Alternatively, you can use Singularity to build and run a container following the instructions in +:ref:`this section `. Once the `cuda-quantum.sif` image is built and +available in your home directory on the remote host, you can update your VS Code configuration +to enable/improve completion, hover information, and other development tools within the container. + +To do so, open the Command Palette and enter "Remote-SSH: Open SSH Configuration File". +Add a new entry to that file with the command to launch the container, and edit the configuration +of the remote host, titled `remote-host` in the snippets below, to add a new identifier: + +.. code-block:: console + + Host cuda-quantum~* + RemoteCommand singularity run --writable --fakeroot --nv ~/cuda-quantum.sif + RequestTTY yes + + Host remote-host cuda-quantum~remote-host + HostName ... + ... + +You will need to edit a couple of VS Code setting to make use of the newly defined remote command; +open the Command Palette, enter "Preferences: Open User Settings (JSON)", and add or update the +following configurations: + +.. code-block:: console + + "remote.SSH.enableRemoteCommand": true, + "remote.SSH.useLocalServer": true, + "remote.SSH.remoteServerListenOnSocket": false, + "remote.SSH.connectTimeout": 120, + "remote.SSH.serverInstallPath": { + "cuda-quantum~remote-host": "~/.vscode-container/cuda-quantum", + }, + +After saving the changes, you should now be able to select `cuda-quantum~remote-host` as the host +when connecting via SSH, which will launch the CUDA Quantum container and connect VS Code to it. + +.. note:: + + If the connection to `cuda-quantum~remote-host` fails, you may need to specify the full + path to the `singularity` executable on the remote host, since environment variables, + and specifically the configured `PATH` may be different during launch than in your user account. + +.. TODO: + Use DGX Cloud + ++++++++++++++++++++++++++++++++++++ + +Additional CUDA Tools +------------------------------------ + +CUDA Quantum makes use of CUDA tools in certain backends and components. +If you install CUDA Quantum via `PyPI `__, please follow the installation instructions there to install the necessary CUDA dependencies. +If you are using the CUDA Quantum container image, the image already contains all necessary runtime libraries to use all CUDA Quantum components. It does not, +however, contain all development dependencies for CUDA, such as, for example the `nvcc` compiler. You can install all CUDA development dependencies by running the command + +.. code-block:: console + + sudo apt-get install cuda-toolkit-11.8 + +inside the container. Note that most Python packages that use GPU-acceleration, such as for example `CuPy `__, require an existing CUDA installation. After installing the `cuda-toolkit-11.8` you can install CuPy with the command + +.. code-block:: console + + python3 -m pip install cupy-cuda11x .. _dependencies-and-compatibility: @@ -162,7 +462,7 @@ The supported CPUs include x86_64 (x86-64-v3 architecture and newer) and ARM64 a .. note:: - Some of the components included in the CUDA Quantum Python wheels depend on an existing CUDA installation on your system. For more information about installing the CUDA Quantum Python wheels, take a look at :ref:`this section `. + Some of the components included in the CUDA Quantum Python wheels depend on an existing CUDA installation on your system. For more information about installing the CUDA Quantum Python wheels, take a look at :ref:`this section `. The following table summarizes the required components. @@ -193,26 +493,85 @@ The following table summarizes the required components. Detailed information about supported drivers for different CUDA versions and be found `here `__. +.. _validate-installation: + Next Steps ---------- -The Docker image contains a folder with example in the :code:`/home/cudaq` directory. These examples are provided to -get you started with CUDA Quantum and understanding the programming and execution model. -If you are not using the Docker image, you can find these examples on our `GitHub repository `__. +You can now compile and/or run the C++ and Python examples using the terminal. +To open a terminal in VS Code, open the Command Palette with `Ctrl+Shift+P` and +enter "View: Show Terminal". + +.. image:: _static/getToWork.png + +The CUDA Quantum image contains a folder with examples and tutorials in the :code:`/home/cudaq` directory. +These examples are provided to get you started with CUDA Quantum and understanding +the programming and execution model. +If you are not using a container image, you can find these examples on our +`GitHub repository `__. + +Let's start by running a simple program to validate your installation. +The samples contain an implementation of a +`Bernstein-Vazirani algorithm `__. +To run the example, execute the command: + +.. tab:: C++ + + .. code-block:: console + + nvq++ examples/cpp/algorithms/bernstein_vazirani.cpp && ./a.out + +.. tab:: Python + + .. code-block:: console + + python examples/python/bernstein_vazirani.py --size 5 + +This will execute the program on the default simulator, which will use GPU-acceleration if +a suitable GPU has been detected. To confirm that the GPU acceleration works, you can +increase the size of the secret string, and pass the target as a command line argument: + +.. tab:: C++ + + .. code-block:: console + + nvq++ examples/cpp/algorithms/bernstein_vazirani.cpp -DSIZE=25 --target nvidia && ./a.out + +.. tab:: Python + + .. code-block:: console + + python examples/python/bernstein_vazirani.py --size 25 --target nvidia + +This program should complete fairly quickly. Depending on the available memory on your GPU, +you can set the size of the secret string to up to 28-32 when running on the `nvidia` target. + +.. note:: + + If you get an error that the CUDA driver version is insufficient or no GPU has been detected, + check that you have enabled GPU support when launching the container by passing the `--gpus all` flag + (for :ref:`Docker `) or the `--nv` flag (for :ref:`Singularity `). + If you are not running a container, you can execute the command `nvidia-smi` to confirm your setup; + if the command is unknown or fails, you do not have a GPU or do not have a driver installed. If the command + succeeds, please confirm that your CUDA and driver version matches the + :ref:`supported versions `. + +Let's compare that to using only your CPU: + +.. tab:: C++ -Start of by trying to compile a simple one, like :code:`examples/cpp/basics/static_kernel.cpp`: + .. code-block:: console -.. code-block:: console + nvq++ examples/cpp/algorithms/bernstein_vazirani.cpp -DSIZE=25 --target qpp-cpu && ./a.out - nvq++ examples/cpp/basics/static_kernel.cpp - ./a.out +.. tab:: Python -If you have GPU support (e.g. you successfully provided :code:`--gpus` to your docker -run command), try out the 30 qubit version of this example. + .. code-block:: console -.. code-block:: console + python examples/python/bernstein_vazirani.py --size 25 --target qpp-cpu - nvq++ examples/cpp/basics/cuquantum_backends.cpp --target nvidia - ./a.out +When you execute this command, the program simply seems to hang; that is because it takes +a long time for the CPU-only backend to simulate 28+ qubits! Cancel the execution with `Ctrl+C`. -For more information about developing and running CUDA Quantum code, take a look at the page :doc:`Using CUDA Quantum `. +You are now all set to start developing quantum applications using CUDA Quantum! +Please proceed to :doc:`Using CUDA Quantum ` to learn the basics. diff --git a/docs/sphinx/using/cpp.rst b/docs/sphinx/using/cpp.rst index fa3abdb99e..112410463b 100644 --- a/docs/sphinx/using/cpp.rst +++ b/docs/sphinx/using/cpp.rst @@ -101,7 +101,7 @@ can be trivially simulated via a NVIDIA GPU-accelerated backend: .. literalinclude:: ../examples/cpp/basics/cuquantum_backends.cpp :language: cpp -Here we generate a GHZ state on 30 qubits. To run with the built-in cuQuantum state +Here we generate a GHZ state on 28 qubits. To run with the built-in cuQuantum state vector support, we pass the :code:`--target nvidia` flag at compile time: .. code:: bash diff --git a/docs/sphinx/using/tutorials.rst b/docs/sphinx/using/tutorials.rst index 14ebe79cc3..8f06fd9aba 100644 --- a/docs/sphinx/using/tutorials.rst +++ b/docs/sphinx/using/tutorials.rst @@ -7,7 +7,7 @@ Tutorials that give an in depth view of CUDA Quantum and its applications in Pyt .. nbgallery:: /examples/python/tutorials/executing_circuits.ipynb - /examples/python/tutorials/single_qubit_rotation.ipynb + /examples/python/tutorials/cost_minimization.ipynb /examples/python/tutorials/noisy_simulations.ipynb /examples/python/tutorials/vqe.ipynb /examples/python/tutorials/hybrid_qnns.ipynb