diff --git a/.github/workflows/image.yaml b/.github/workflows/image.yaml index 5b65bae6..a1377323 100644 --- a/.github/workflows/image.yaml +++ b/.github/workflows/image.yaml @@ -36,9 +36,11 @@ jobs: driver: - 535.216.01 - 550.127.05 + - 560.35.03 dist: - ubuntu20.04 - ubuntu22.04 + - amzn2023 - rhel8 ispr: - ${{github.event_name == 'pull_request'}} @@ -49,6 +51,18 @@ jobs: - ispr: true dist: ubuntu20.04 driver: 550.127.05 + - ispr: true + dist: ubuntu20.04 + driver: 560.35.03 + - ispr: true + dist: ubuntu22.04 + driver: 560.35.03 + - ispr: true + dist: amzn2023 + driver: 535.216.01 + - ispr: true + dist: amzn2023 + driver: 535.216.01 fail-fast: false steps: - uses: actions/checkout@v4 diff --git a/.nvidia-ci.yml b/.nvidia-ci.yml index 243dd357..996b8c7e 100644 --- a/.nvidia-ci.yml +++ b/.nvidia-ci.yml @@ -80,6 +80,7 @@ variables: .image-pull-ubuntu22.04: # Perform for each DRIVER_VERSION extends: + - .driver-versions - .driver-versions - .image-pull-generic rules: @@ -184,6 +185,18 @@ image-rhel8: - if: $CI_PIPELINE_SOURCE == "merge_request_event" - !reference [.pipeline-trigger-rules, rules] +.scan-amzn2023: + # Repeat for each DRIVER_VERSION + extends: + - .driver-versions-amzn2023 + - .scan-generic + rules: + - !reference [.scan-rules-common, rules] + - if: $CI_PIPELINE_SOURCE == "schedule" + when: never + - if: $CI_PIPELINE_SOURCE == "merge_request_event" + - !reference [.pipeline-trigger-rules, rules] + .scan-precompiled-ubuntu22.04: variables: DIST: signed_ubuntu22.04 @@ -278,6 +291,12 @@ release:ngc-ubuntu22.04: - .dist-ubuntu22.04 - .driver-versions +release:ngc-amzn2023: + extends: + - .release:ngc + - .dist-amzn2023 + - .driver-versions-amzn2023 + release:ngc-precompiled-ubuntu22.04: variables: DIST: signed_ubuntu22.04 diff --git a/Makefile b/Makefile index f145be68..3614180b 100644 --- a/Makefile +++ b/Makefile @@ -54,7 +54,7 @@ OUT_IMAGE_TAG = $(OUT_IMAGE_VERSION)-$(OUT_DIST) OUT_IMAGE = $(OUT_IMAGE_NAME):$(OUT_IMAGE_TAG) ##### Public rules ##### -DISTRIBUTIONS := ubuntu18.04 ubuntu20.04 ubuntu22.04 signed_ubuntu20.04 signed_ubuntu22.04 rhel8 rhel9 flatcar fedora36 sles15.3 precompiled_rhcos +DISTRIBUTIONS := ubuntu18.04 ubuntu20.04 ubuntu22.04 amzn2023 signed_ubuntu20.04 signed_ubuntu22.04 rhel8 rhel9 flatcar fedora36 sles15.3 precompiled_rhcos PUSH_TARGETS := $(patsubst %, push-%, $(DISTRIBUTIONS)) BASE_FROM := jammy focal PUSH_TARGETS := $(patsubst %, push-%, $(DISTRIBUTIONS)) diff --git a/amzn2023/Dockerfile b/amzn2023/Dockerfile new file mode 100644 index 00000000..ea69f6fb --- /dev/null +++ b/amzn2023/Dockerfile @@ -0,0 +1,102 @@ +FROM nvcr.io/nvidia/cuda:12.6.2-base-amzn2023 AS build + +ARG TARGETARCH + +SHELL ["/bin/bash", "-c"] + +# Remove cuda repository to avoid GPG errors +RUN rm -f /etc/yum.repos.d/cuda* + +RUN dnf update -y && dnf makecache && \ + dnf install -y \ + gcc \ + gcc-c++ \ + make \ + ca-certificates \ + git \ + tar && \ + dnf clean all && rm -rf /var/cache/yum/* + +ENV GOLANG_VERSION=1.23.2 + +# download appropriate binary based on the target architecture for multi-arch builds +RUN curl https://storage.googleapis.com/golang/go${GOLANG_VERSION}.linux-${TARGETARCH}.tar.gz \ + | tar -C /usr/local -xz + +ENV PATH /usr/local/bin:$PATH +ENV PATH /usr/local/go/bin:$PATH + +WORKDIR /work + +RUN git clone https://github.com/NVIDIA/gpu-driver-container driver && \ + cd driver/vgpu/src && \ + go build -o vgpu-util && \ + mv vgpu-util /work + +FROM nvcr.io/nvidia/cuda:12.6.2-base-amzn2023 + +SHELL ["/bin/bash", "-c"] + +ARG BASE_URL=https://us.download.nvidia.com/tesla +ARG TARGETARCH +ENV TARGETARCH=$TARGETARCH +ENV DRIVER_ARCH=${TARGETARCH/amd64/x86_64} +ARG DRIVER_VERSION +ENV DRIVER_VERSION=$DRIVER_VERSION + +# Arg to indicate if driver type is either of passthrough(baremetal) or vgpu +ARG DRIVER_TYPE=passthrough +ENV DRIVER_TYPE=$DRIVER_TYPE +ARG DRIVER_BRANCH=560 +ENV DRIVER_BRANCH=$DRIVER_BRANCH +ARG VGPU_LICENSE_SERVER_TYPE=NLS +ENV VGPU_LICENSE_SERVER_TYPE=$VGPU_LICENSE_SERVER_TYPE +# Enable vGPU version compability check by default +ARG DISABLE_VGPU_VERSION_CHECK=true +ENV DISABLE_VGPU_VERSION_CHECK=$DISABLE_VGPU_VERSION_CHECK +ENV NVIDIA_VISIBLE_DEVICES=void + +RUN echo "TARGETARCH=$TARGETARCH" + +ADD install.sh /tmp + +RUN NVIDIA_GPGKEY_SUM=d0664fbbdb8c32356d45de36c5984617217b2d0bef41b93ccecd326ba3b80c87 && \ + curl -fsSL https://developer.download.nvidia.com/compute/cuda/repos/amzn2023/${DRIVER_ARCH}/D42D0685.pub | sed '/^Version/d' > /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA && \ + echo "$NVIDIA_GPGKEY_SUM /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA" | sha256sum -c --strict - && \ + curl -fsSL -o /etc/yum.repos.d/cuda.repo https://developer.download.nvidia.com/compute/cuda/repos/amzn2023/${DRIVER_ARCH}/cuda-amzn2023.repo + +RUN dnf clean all && dnf makecache && dnf update -y && dnf install -y shadow-utils +RUN /tmp/install.sh reposetup && /tmp/install.sh depinstall && \ + curl -fsSL -o /usr/local/bin/donkey https://github.com/3XX0/donkey/releases/download/v1.1.0/donkey && \ + chmod +x /usr/local/bin/donkey + +COPY nvidia-driver /usr/local/bin +COPY --from=build /work/vgpu-util /usr/local/bin + +RUN curl -fsSL -o /usr/local/bin/extract-vmlinux https://raw.githubusercontent.com/torvalds/linux/master/scripts/extract-vmlinux && \ + chmod +x /usr/local/bin/extract-vmlinux + +ADD drivers drivers/ + +# Fetch the installer automatically for passthrough/baremetal types +RUN if [ "$DRIVER_TYPE" != "vgpu" ]; then \ + cd drivers && \ + /tmp/install.sh download_installer; fi + +# install fabric-manager and nvidia-nscq +RUN if [ "$DRIVER_TYPE" != "vgpu" ] && [ "$TARGETARCH" != "arm64" ]; then \ + dnf install -y nvidia-fabric-manager-${DRIVER_VERSION}-1 libnvidia-nscq-${DRIVER_BRANCH}-${DRIVER_VERSION}-1; fi + +WORKDIR /drivers + +ARG PUBLIC_KEY=empty +COPY ${PUBLIC_KEY} kernel/pubkey.x509 + +# Remove cuda repository to avoid GPG errors +RUN rm -f /etc/yum.repos.d/cuda* +RUN dnf clean all + +# Add NGC DL license from the CUDA image +RUN mkdir /licenses && mv /NGC-DL-CONTAINER-LICENSE /licenses/NGC-DL-CONTAINER-LICENSE + +ENTRYPOINT ["nvidia-driver", "init"] diff --git a/amzn2023/README.md b/amzn2023/README.md new file mode 100644 index 00000000..72367245 --- /dev/null +++ b/amzn2023/README.md @@ -0,0 +1,3 @@ +# AmazonLinux2 [![build status](https://gitlab.com/nvidia/driver/badges/master/build.svg)](https://gitlab.com/nvidia/driver/commits/master) + +See https://github.com/NVIDIA/nvidia-docker/wiki/Driver-containers-(Beta) diff --git a/amzn2023/drivers/README.md b/amzn2023/drivers/README.md new file mode 100644 index 00000000..aa9d4029 --- /dev/null +++ b/amzn2023/drivers/README.md @@ -0,0 +1 @@ +# Folder for downloading vGPU drivers and dependent metadata files diff --git a/amzn2023/empty b/amzn2023/empty new file mode 100644 index 00000000..e69de29b diff --git a/amzn2023/install.sh b/amzn2023/install.sh new file mode 100755 index 00000000..dc81c066 --- /dev/null +++ b/amzn2023/install.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash + +set -eux + +download_installer () { + DRIVER_ARCH=${TARGETARCH/amd64/x86_64} && curl -fSsl -O $BASE_URL/$DRIVER_VERSION/NVIDIA-Linux-$DRIVER_ARCH-$DRIVER_VERSION.run && \ + chmod +x NVIDIA-Linux-$DRIVER_ARCH-$DRIVER_VERSION.run; +} + +dep_install () { + if [ "$TARGETARCH" = "amd64" ]; then + DRIVER_ARCH=${TARGETARCH/amd64/x86_64} + dnf update -y && dnf install -y \ + gcc \ + make \ + glibc-devel \ + ca-certificates \ + kmod \ + file \ + elfutils-libelf-devel \ + libglvnd-devel \ + shadow-utils \ + util-linux \ + tar \ + rpm-build \ + dnf-utils \ + pkgconfig && \ + dnf clean all && \ + rm -rf /var/cache/yum/* + fi +} + +repo_setup () { + if [ "$TARGETARCH" = "amd64" ]; then + echo "[cuda-amzn2023-x86_64] +name=cuda-amzn2023-x86_64 +baseurl=https://developer.download.nvidia.com/compute/cuda/repos/amzn2023/$DRIVER_ARCH +enabled=1 +gpgcheck=1 +gpgkey=https://developer.download.nvidia.com/compute/cuda/repos/amzn2023/$DRIVER_ARCH/D42D0685.pub" > /etc/yum.repos.d/cuda.repo && \ + usermod -o -u 0 -g 0 nobody + else + echo "TARGETARCH doesn't match a known arch target" + exit 1 + fi +} + +if [ "$1" = "reposetup" ]; then + repo_setup +elif [ "$1" = "depinstall" ]; then + dep_install +elif [ "$1" = "download_installer" ]; then + download_installer +else + echo "Unknown function: $1" + exit 1 +fi + diff --git a/amzn2023/nvidia-driver b/amzn2023/nvidia-driver new file mode 100755 index 00000000..dd9b9a67 --- /dev/null +++ b/amzn2023/nvidia-driver @@ -0,0 +1,767 @@ +#! /bin/bash +# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. + +set -eu + +RUN_DIR=/run/nvidia +PID_FILE=${RUN_DIR}/${0##*/}.pid +DRIVER_VERSION=${DRIVER_VERSION:?"Missing DRIVER_VERSION env"} +KERNEL_UPDATE_HOOK=/run/kernel/postinst.d/update-nvidia-driver +NUM_VGPU_DEVICES=0 +GPU_DIRECT_RDMA_ENABLED="${GPU_DIRECT_RDMA_ENABLED:-false}" +USE_HOST_MOFED="${USE_HOST_MOFED:-false}" +NVIDIA_MODULE_PARAMS=() +NVIDIA_UVM_MODULE_PARAMS=() +NVIDIA_MODESET_MODULE_PARAMS=() +NVIDIA_PEERMEM_MODULE_PARAMS=() +TARGETARCH=${TARGETARCH:?"Missing TARGETARCH env"} + +OPEN_KERNEL_MODULES_ENABLED=${OPEN_KERNEL_MODULES_ENABLED:-false} +[[ "${OPEN_KERNEL_MODULES_ENABLED}" == "true" ]] && KERNEL_TYPE=kernel-open || KERNEL_TYPE=kernel + +DRIVER_ARCH=${TARGETARCH/amd64/x86_64} && DRIVER_ARCH=${DRIVER_ARCH/arm64/aarch64} + +echo "DRIVER_ARCH is $DRIVER_ARCH" + +_update_package_cache() { + if [ "${PACKAGE_TAG:-}" != "builtin" ]; then + echo "Updating the package cache..." + dnf -q makecache + fi +} + +_cleanup_package_cache() { + if [ "${PACKAGE_TAG:-}" != "builtin" ]; then + echo "Cleaning up the package cache..." + dnf clean all + fi +} + +_update_ca_certificates() { + if [ ! -z "$(ls -A /etc/pki/ca-trust/source/anchors/ )" ]; then + update-ca-trust extract + fi +} + +# Resolve the kernel version to the form major.minor. +_resolve_kernel_version() { + dnf -y -q --releasever=latest update >/dev/null + dnf -y -q groupinstall "Development Tools" >/dev/null + + local version=$(dnf -q list available --showduplicates kernel-headers | + awk -v arch=$(uname -m) 'NR>1 {print $2"."arch}' | tac | grep -E -m1 "^${KERNEL_VERSION/latest/.*}") + + echo "Resolving Linux kernel version..." + if [ -z "${version}" ]; then + echo "Could not resolve Linux kernel version" >&2 + return 1 + fi + + KERNEL_VERSION="${version}" + echo "Proceeding with Linux kernel version ${KERNEL_VERSION}" + + return 0 +} + +# Install the kernel modules header/builtin/order files and generate the kernel version string. +_install_prerequisites() ( + local tmp_dir=$(mktemp -d) + + trap "rm -rf ${tmp_dir}" EXIT + cd ${tmp_dir} + + rm -rf /lib/modules/${KERNEL_VERSION} + mkdir -p /lib/modules/${KERNEL_VERSION}/proc + + echo "Installing Linux kernel headers..." + dnf -q -y install kernel-headers-${KERNEL_VERSION} kernel-devel-${KERNEL_VERSION} kernel-modules-extra-${KERNEL_VERSION} >/dev/null + # dnf -q -y install kernel-headers kernel-devel kernel-modules-extra >/dev/null + ln -s /usr/src/kernels/${KERNEL_VERSION} /lib/modules/${KERNEL_VERSION}/build + + echo "Installing Linux kernel module files..." + dnf download kernel-${KERNEL_VERSION%.x86_64} --alldeps + rpm2cpio kernel-${KERNEL_VERSION}.rpm | cpio -idm + mv lib/modules/${KERNEL_VERSION}/modules.* /lib/modules/${KERNEL_VERSION} + if [ -d "/lib/modules/${KERNEL_VERSION}/kernel" ]; then + \cp -rf lib/modules/${KERNEL_VERSION}/kernel/* /lib/modules/${KERNEL_VERSION}/kernel/ + else + mv lib/modules/${KERNEL_VERSION}/kernel /lib/modules/${KERNEL_VERSION} + fi + + # Prevent depmod from giving a WARNING about missing files + touch /lib/modules/${KERNEL_VERSION}/modules.order + touch /lib/modules/${KERNEL_VERSION}/modules.builtin + depmod ${KERNEL_VERSION} + + echo "Generating Linux kernel version string..." + # TODO currently boot/vmlinuz in /driver is not available with amzon2023 + # extract-vmlinux boot/vmlinuz-${KERNEL_VERSION} | strings | grep -E '^Linux version' | sed 's/^\(.*\)\s\+(.*)$/\1/' > version + extract-vmlinux lib/modules/${KERNEL_VERSION}/vmlinuz | strings | grep -E '^Linux version' | sed 's/^\(.*\)\s\+(.*)$/\1/' > version + if [ -z "$(&2 + return 1 + fi + mv version /lib/modules/${KERNEL_VERSION}/proc +) + +# Cleanup the prerequisites installed above. +_remove_prerequisites() { + if [ "${PACKAGE_TAG:-}" != "builtin" ]; then + dnf -q -y remove kernel-headers-${KERNEL_VERSION} kernel-devel-${KERNEL_VERSION} kernel-modules-extra-${KERNEL_VERSION} >/dev/null + # dnf -q -y remove dkms kernel-headers kernel-devel kernel-modules-extra >/dev/null + # TODO remove module files not matching an existing driver package. + fi +} + +# Check if the kernel version requires a new precompiled driver packages. +_kernel_requires_package() { + local proc_mount_arg="" + + echo "Checking NVIDIA driver packages..." + cd /usr/src/nvidia-${DRIVER_VERSION}/${KERNEL_TYPE} + + # when the kernel version is latest on host, this check fails and lead to recompilation, even when precompiled modules exist. + # proc_mount_arg needs to be set, to do the module match check below + # if [[ -f /lib/modules/${KERNEL_VERSION}/proc/version ]]; then + proc_mount_arg="--proc-mount-point /lib/modules/${KERNEL_VERSION}/proc" + # fi + for pkg_name in $(ls -d -1 precompiled/** 2> /dev/null); do + if ! ../mkprecompiled --match ${pkg_name} ${proc_mount_arg} > /dev/null; then + echo "Found NVIDIA driver package ${pkg_name##*/}" + return 1 + fi + done + return 0 +} + +# Compile the kernel modules, optionally sign them, and generate a precompiled package for use by the nvidia-installer. +_create_driver_package() ( + local pkg_name="nvidia-modules-${KERNEL_VERSION%-*}${PACKAGE_TAG:+-${PACKAGE_TAG}}" + local nvidia_sign_args="" + local nvidia_modeset_sign_args="" + local nvidia_uvm_sign_args="" + + export IGNORE_CC_MISMATCH=1 + trap "make -s -j ${MAX_THREADS} SYSSRC=/lib/modules/${KERNEL_VERSION}/build clean > /dev/null" EXIT + + echo "Compiling NVIDIA driver kernel modules..." + cd /usr/src/nvidia-${DRIVER_VERSION}/${KERNEL_TYPE} + + if _gpu_direct_rdma_enabled; then + ln -s /run/mellanox/drivers/usr/src/ofa_kernel /usr/src/ + # if arch directory exists(MOFED >=5.5) then create a symlink as expected by GPU driver installer + # This is required as currently GPU driver installer doesn't expect headers in x86_64 folder, but only in either default or kernel-version folder. + # ls -ltr /usr/src/ofa_kernel/ + # lrwxrwxrwx 1 root root 36 Dec 8 20:10 default -> /etc/alternatives/ofa_kernel_headers + # drwxr-xr-x 4 root root 4096 Dec 8 20:14 x86_64 + # lrwxrwxrwx 1 root root 44 Dec 9 19:05 5.4.0-90-generic -> /usr/src/ofa_kernel/x86_64/5.4.0-90-generic/ + if [[ -d /run/mellanox/drivers/usr/src/ofa_kernel/$DRIVER_ARCH/`uname -r` ]]; then + if [[ ! -e /usr/src/ofa_kernel/`uname -r` ]]; then + ln -s /run/mellanox/drivers/usr/src/ofa_kernel/$DRIVER_ARCH/`uname -r` /usr/src/ofa_kernel/ + fi + fi + fi + + export IGNORE_CC_MISMATCH=1 + make -s -j ${MAX_THREADS} SYSSRC=/lib/modules/${KERNEL_VERSION}/build nv-linux.o nv-modeset-linux.o > /dev/null + + echo "Relinking NVIDIA driver kernel modules..." + rm -f nvidia.ko nvidia-modeset.ko + ld -d -r -o nvidia.ko ./nv-linux.o ./nvidia/nv-kernel.o_binary + ld -d -r -o nvidia-modeset.ko ./nv-modeset-linux.o ./nvidia-modeset/nv-modeset-kernel.o_binary + + if [ -n "${PRIVATE_KEY}" ]; then + echo "Signing NVIDIA driver kernel modules..." + donkey get ${PRIVATE_KEY} sh -c "PATH=${PATH}:/usr/src/linux-headers-${KERNEL_VERSION}/scripts && \ + sign-file sha512 \$DONKEY_FILE pubkey.x509 nvidia.ko nvidia.ko.sign && \ + sign-file sha512 \$DONKEY_FILE pubkey.x509 nvidia-modeset.ko nvidia-modeset.ko.sign && \ + sign-file sha512 \$DONKEY_FILE pubkey.x509 nvidia-uvm.ko" + nvidia_sign_args="--linked-module nvidia.ko --signed-module nvidia.ko.sign" + nvidia_modeset_sign_args="--linked-module nvidia-modeset.ko --signed-module nvidia-modeset.ko.sign" + nvidia_uvm_sign_args="--signed" + fi + + echo "Building NVIDIA driver package ${pkg_name}..." + ../mkprecompiled --pack ${pkg_name} --description ${KERNEL_VERSION} \ + --proc-mount-point /lib/modules/${KERNEL_VERSION}/proc \ + --driver-version ${DRIVER_VERSION} \ + --kernel-interface nv-linux.o \ + --linked-module-name nvidia.ko \ + --core-object-name nvidia/nv-kernel.o_binary \ + ${nvidia_sign_args} \ + --target-directory . \ + --kernel-interface nv-modeset-linux.o \ + --linked-module-name nvidia-modeset.ko \ + --core-object-name nvidia-modeset/nv-modeset-kernel.o_binary \ + ${nvidia_modeset_sign_args} \ + --target-directory . \ + --kernel-module nvidia-uvm.ko \ + ${nvidia_uvm_sign_args} \ + --target-directory . + mkdir -p precompiled + mv ${pkg_name} precompiled +) + +_assert_nvswitch_system() { + [ -d /proc/driver/nvidia-nvswitch/devices ] || return 1 + if [ -z "$(ls -A /proc/driver/nvidia-nvswitch/devices)" ]; then + return 1 + fi + return 0 +} + +# Check if mellanox devices are present +_mellanox_devices_present() { + devices_found=0 + for dev in /sys/bus/pci/devices/*; do + read vendor < $dev/vendor + if [ "$vendor" = "0x15b3" ]; then + echo "Mellanox device found at $(basename $dev)" + return 0 + fi + done + echo "No Mellanox devices were found..." + return 1 +} + +_gpu_direct_rdma_enabled() { + if [ "${GPU_DIRECT_RDMA_ENABLED}" = "true" ]; then + # check if mellanox cards are present + if _mellanox_devices_present; then + return 0 + fi + fi + return 1 +} + +# For each kernel module configuration file mounted into the container, +# parse the file contents and extract the custom module parameters that +# are to be passed as input to 'modprobe'. +# +# Assumptions: +# - Configuration files are named .conf (i.e. nvidia.conf, nvidia-uvm.conf). +# - Configuration files are mounted inside the container at /drivers. +# - Each line in the file contains at least one parameter, where parameters on the same line +# are space delimited. It is up to the user to properly format the file to ensure +# the correct set of parameters are passed to 'modprobe'. +_get_module_params() { + local base_path="/drivers" + # nvidia + if [ -f "${base_path}/nvidia.conf" ]; then + while IFS="" read -r param || [ -n "$param" ]; do + NVIDIA_MODULE_PARAMS+=("$param") + done <"${base_path}/nvidia.conf" + echo "Module parameters provided for nvidia: ${NVIDIA_MODULE_PARAMS[@]}" + fi + # nvidia-uvm + if [ -f "${base_path}/nvidia-uvm.conf" ]; then + while IFS="" read -r param || [ -n "$param" ]; do + NVIDIA_UVM_MODULE_PARAMS+=("$param") + done <"${base_path}/nvidia-uvm.conf" + echo "Module parameters provided for nvidia-uvm: ${NVIDIA_UVM_MODULE_PARAMS[@]}" + fi + # nvidia-modeset + if [ -f "${base_path}/nvidia-modeset.conf" ]; then + while IFS="" read -r param || [ -n "$param" ]; do + NVIDIA_MODESET_MODULE_PARAMS+=("$param") + done <"${base_path}/nvidia-modeset.conf" + echo "Module parameters provided for nvidia-modeset: ${NVIDIA_MODESET_MODULE_PARAMS[@]}" + fi + # nvidia-peermem + if [ -f "${base_path}/nvidia-peermem.conf" ]; then + while IFS="" read -r param || [ -n "$param" ]; do + NVIDIA_PEERMEM_MODULE_PARAMS+=("$param") + done <"${base_path}/nvidia-peermem.conf" + echo "Module parameters provided for nvidia-peermem: ${NVIDIA_PEERMEM_MODULE_PARAMS[@]}" + fi +} + +# Load the kernel modules and start persistenced. +_load_driver() { + echo "Parsing kernel module parameters..." + _get_module_params + + local nv_fw_search_path="$RUN_DIR/driver/lib/firmware" + local set_fw_path="true" + local fw_path_config_file="/sys/module/firmware_class/parameters/path" + for param in "${NVIDIA_MODULE_PARAMS[@]}"; do + if [[ "$param" == "NVreg_EnableGpuFirmware=0" ]]; then + set_fw_path="false" + fi + done + + if [[ "$set_fw_path" == "true" ]]; then + echo "Configuring the following firmware search path in '$fw_path_config_file': $nv_fw_search_path" + if [[ ! -z $(grep '[^[:space:]]' $fw_path_config_file) ]]; then + echo "WARNING: A search path is already configured in $fw_path_config_file" + echo " Retaining the current configuration" + else + echo -n "$nv_fw_search_path" > $fw_path_config_file || echo "WARNING: Failed to configure firmware search path" + fi + fi + + echo "Loading ipmi and i2c_core kernel modules..." + modprobe -a i2c_core ipmi_msghandler ipmi_devintf + + echo "Loading NVIDIA driver kernel modules..." + set -o xtrace +o nounset + modprobe nvidia "${NVIDIA_MODULE_PARAMS[@]}" + modprobe nvidia-uvm "${NVIDIA_UVM_MODULE_PARAMS[@]}" + modprobe nvidia-modeset "${NVIDIA_MODESET_MODULE_PARAMS[@]}" + set +o xtrace -o nounset + + if _gpu_direct_rdma_enabled; then + echo "Loading NVIDIA Peer Memory kernel module..." + set -o xtrace +o nounset + modprobe -a nvidia-peermem "${NVIDIA_PEERMEM_MODULE_PARAMS[@]}" + set +o xtrace -o nounset + fi + + echo "Starting NVIDIA persistence daemon..." + nvidia-persistenced --persistence-mode + + if [ "${DRIVER_TYPE}" = "vgpu" ]; then + echo "Copying gridd.conf..." + cp /drivers/gridd.conf /etc/nvidia/gridd.conf + if [ "${VGPU_LICENSE_SERVER_TYPE}" = "NLS" ]; then + echo "Copying ClientConfigToken..." + mkdir -p /etc/nvidia/ClientConfigToken/ + cp /drivers/ClientConfigToken/* /etc/nvidia/ClientConfigToken/ + fi + + echo "Starting nvidia-gridd.." + LD_LIBRARY_PATH=/usr/lib/$DRIVER_ARCH-linux-gnu/nvidia/gridd nvidia-gridd + + # Start virtual topology daemon + _start_vgpu_topology_daemon + fi + + if _assert_nvswitch_system; then + echo "Starting NVIDIA fabric manager daemon..." + nv-fabricmanager -c /usr/share/nvidia/nvswitch/fabricmanager.cfg + fi + + return 0 +} + +# Stop persistenced and unload the kernel modules if they are currently loaded. +_unload_driver() { + local rmmod_args=() + local nvidia_deps=0 + local nvidia_refs=0 + local nvidia_uvm_refs=0 + local nvidia_modeset_refs=0 + local nvidia_peermem_refs=0 + local nvidia_fs_refs=0 + local nvidia_drm_refs=0 + + echo "Stopping NVIDIA persistence daemon..." + if [ -f /var/run/nvidia-persistenced/nvidia-persistenced.pid ]; then + local pid=$(< /var/run/nvidia-persistenced/nvidia-persistenced.pid) + + kill -SIGTERM "${pid}" + for i in $(seq 1 50); do + kill -0 "${pid}" 2> /dev/null || break + sleep 0.1 + done + if [ $i -eq 50 ]; then + echo "Could not stop NVIDIA persistence daemon" >&2 + return 1 + fi + fi + + if [ -f /var/run/nvidia-gridd/nvidia-gridd.pid ]; then + echo "Stopping NVIDIA grid daemon..." + local pid=$(< /var/run/nvidia-gridd/nvidia-gridd.pid) + + kill -SIGTERM "${pid}" + for i in $(seq 1 10); do + kill -0 "${pid}" 2> /dev/null || break + sleep 0.1 + done + if [ $i -eq 10 ]; then + echo "Could not stop NVIDIA Grid daemon" >&2 + return 1 + fi + fi + + if [ -f /var/run/nvidia-fabricmanager/nv-fabricmanager.pid ]; then + echo "Stopping NVIDIA fabric manager daemon..." + local pid=$(< /var/run/nvidia-fabricmanager/nv-fabricmanager.pid) + + kill -SIGTERM "${pid}" + for i in $(seq 1 50); do + kill -0 "${pid}" 2> /dev/null || break + sleep 0.1 + done + if [ $i -eq 50 ]; then + echo "Could not stop NVIDIA fabric manager daemon" >&2 + return 1 + fi + fi + + echo "Unloading NVIDIA driver kernel modules..." + if [ -f /sys/module/nvidia_fs/refcnt ]; then + nvidia_fs_refs=$(< /sys/module/nvidia_fs/refcnt) + rmmod_args+=("nvidia-fs") + ((++nvidia_deps)) + fi + if [ -f /sys/module/nvidia_modeset/refcnt ]; then + nvidia_modeset_refs=$(< /sys/module/nvidia_modeset/refcnt) + rmmod_args+=("nvidia-modeset") + ((++nvidia_deps)) + fi + if [ -f /sys/module/nvidia_uvm/refcnt ]; then + nvidia_uvm_refs=$(< /sys/module/nvidia_uvm/refcnt) + rmmod_args+=("nvidia-uvm") + ((++nvidia_deps)) + fi + if [ -f /sys/module/nvidia_peermem/refcnt ]; then + nvidia_peermem_refs=$(< /sys/module/nvidia_peermem/refcnt) + rmmod_args+=("nvidia-peermem") + ((++nvidia_deps)) + fi + if [ -f /sys/module/nvidia_drm/refcnt ]; then + nvidia_drm_refs=$(< /sys/module/nvidia_drm/refcnt) + rmmod_args+=("nvidia-drm") + ((++nvidia_deps)) + fi + if [ -f /sys/module/nvidia/refcnt ]; then + nvidia_refs=$(< /sys/module/nvidia/refcnt) + rmmod_args+=("nvidia") + fi + if [ ${nvidia_refs} -gt ${nvidia_deps} ] \ + || [ ${nvidia_uvm_refs} -gt 0 ] \ + || [ ${nvidia_modeset_refs} -gt 0 ] \ + || [ ${nvidia_peermem_refs} -gt 0 ] \ + || [ ${nvidia_fs_refs} -gt 0 ] \ + || [ ${nvidia_drm_refs} -gt 0 ]; then + # run lsmod to debug module usage + lsmod | grep nvidia + echo "Could not unload NVIDIA driver kernel modules, driver is in use" >&2 + return 1 + fi + + if [ ${#rmmod_args[@]} -gt 0 ]; then + rmmod ${rmmod_args[@]} + fi + return 0 +} + +# Link and install the kernel modules from a precompiled package using the nvidia-installer. +_install_driver() { + local install_args=() + + echo "Installing NVIDIA driver kernel modules..." + cd /usr/src/nvidia-${DRIVER_VERSION} + # TODO visit again + # Do not delete the video module; it will cause DRM and backlight module issues. + # KERNEL_VERSION >=6.10 has default drm + #if [ -d /lib/modules/${KERNEL_VERSION}/kernel/drivers/video ]; then + # rm -rf /lib/modules/${KERNEL_VERSION}/kernel/drivers/video + #else + # rm -rf /lib/modules/${KERNEL_VERSION}/video + #fi + + if [ "${ACCEPT_LICENSE}" = "yes" ]; then + install_args+=("--accept-license") + fi + nvidia-installer --kernel-module-only --no-drm --ui=none --no-nouveau-check -m=${KERNEL_TYPE} ${install_args[@]+"${install_args[@]}"} +} + +# Mount the driver rootfs into the run directory with the exception of sysfs. +_mount_rootfs() { + echo "Mounting NVIDIA driver rootfs..." + mount --make-runbindable /sys + mount --make-private /sys + mkdir -p ${RUN_DIR}/driver + mount --rbind / ${RUN_DIR}/driver +} + +# Unmount the driver rootfs from the run directory. +_unmount_rootfs() { + echo "Unmounting NVIDIA driver rootfs..." + if findmnt -r -o TARGET | grep "${RUN_DIR}/driver" > /dev/null; then + umount -l -R ${RUN_DIR}/driver + fi +} + +# Write a kernel postinst.d script to automatically precompile packages on kernel update (similar to DKMS). +_write_kernel_update_hook() { + if [ ! -d ${KERNEL_UPDATE_HOOK%/*} ]; then + return + fi + + echo "Writing kernel update hook..." + cat > ${KERNEL_UPDATE_HOOK} <<'EOF' +#!/bin/bash + +set -eu +trap 'echo "ERROR: Failed to update the NVIDIA driver" >&2; exit 0' ERR + +NVIDIA_DRIVER_PID=$(< /run/nvidia/nvidia-driver.pid) + +export "$(grep -z DRIVER_VERSION /proc/${NVIDIA_DRIVER_PID}/environ)" +nsenter -t "${NVIDIA_DRIVER_PID}" -m -- nvidia-driver update --kernel "$1" +EOF + chmod +x ${KERNEL_UPDATE_HOOK} +} + +_shutdown() { + if _unload_driver; then + _unmount_rootfs + rm -f ${PID_FILE} ${KERNEL_UPDATE_HOOK} + return 0 + fi + return 1 +} + +_find_vgpu_driver_version() { + local count="" + local version="" + + if [ "${DISABLE_VGPU_VERSION_CHECK}" = "true" ]; then + echo "vgpu version compatibility check is disabled" + return 0 + fi + # check if vgpu devices are present + count=$(vgpu-util count) + if [ $? -ne 0 ]; then + echo "cannot find vgpu devices on host, pleae check /var/log/vgpu-util.log for more details..." + return 0 + fi + NUM_VGPU_DEVICES=$(echo "$count" | awk -F= '{print $2}') + if [ $NUM_VGPU_DEVICES -eq 0 ]; then + # no vgpu devices found, treat as passthrough + return 0 + fi + echo "found $NUM_VGPU_DEVICES vgpu devices on host" + + # find compatible guest driver using drive catalog + version=$(vgpu-util match -i /drivers -c /drivers/vgpuDriverCatalog.yaml) + if [ $? -ne 0 ]; then + echo "cannot find match for compatible vgpu driver from available list, please check /var/log/vgpu-util.log for more details..." + return 1 + fi + DRIVER_VERSION=$(echo "$version" | awk -F= '{print $2}') + echo "vgpu driver version selected: ${DRIVER_VERSION}" + return 0 +} + +_start_vgpu_topology_daemon() { + type nvidia-topologyd > /dev/null 2>&1 || return 0 + echo "Starting nvidia-topologyd.." + nvidia-topologyd +} + +init() { + if [ "${DRIVER_TYPE}" = "vgpu" ]; then + _find_vgpu_driver_version || exit 1 + fi + + # Install the userspace components and copy the kernel module sources. + sh NVIDIA-Linux-$DRIVER_ARCH-$DRIVER_VERSION.run -x && \ + cd NVIDIA-Linux-$DRIVER_ARCH-$DRIVER_VERSION && \ + ./nvidia-installer --silent \ + --no-kernel-module \ + --no-nouveau-check \ + --no-nvidia-modprobe \ + --no-rpms \ + --no-backup \ + --no-check-for-alternate-installs \ + --no-libglx-indirect \ + --no-install-libglvnd \ + --x-prefix=/tmp/null \ + --x-module-path=/tmp/null \ + --x-library-path=/tmp/null \ + --x-sysconfig-path=/tmp/null && \ + mkdir -p /usr/src/nvidia-${DRIVER_VERSION} && \ + mv LICENSE mkprecompiled ${KERNEL_TYPE} /usr/src/nvidia-${DRIVER_VERSION} && \ + sed '9,${/^\(kernel\|LICENSE\)/!d}' .manifest > /usr/src/nvidia-${DRIVER_VERSION}/.manifest + + echo -e "\n========== NVIDIA Software Installer ==========\n" + echo -e "Starting installation of NVIDIA driver version ${DRIVER_VERSION} for Linux kernel version ${KERNEL_VERSION}\n" + + exec 3> ${PID_FILE} + if ! flock -n 3; then + echo "An instance of the NVIDIA driver is already running, aborting" + exit 1 + fi + echo $$ >&3 + + trap "echo 'Caught signal'; exit 1" HUP INT QUIT PIPE TERM + trap "_shutdown" EXIT + + _unload_driver || exit 1 + _unmount_rootfs + + if _kernel_requires_package; then + _update_ca_certificates + _update_package_cache + _resolve_kernel_version || exit 1 + _install_prerequisites + _create_driver_package + #_remove_prerequisites + #_cleanup_package_cache + fi + + _install_driver + _load_driver || exit 1 + _mount_rootfs + _write_kernel_update_hook + + echo "Done, now waiting for signal" + sleep infinity & + trap "echo 'Caught signal'; _shutdown && { kill $!; exit 0; }" HUP INT QUIT PIPE TERM + trap - EXIT + while true; do wait $! || continue; done + exit 0 +} + +update() { + exec 3>&2 + if exec 2> /dev/null 4< ${PID_FILE}; then + if ! flock -n 4 && read pid <&4 && kill -0 "${pid}"; then + exec > >(tee -a "/proc/${pid}/fd/1") + exec 2> >(tee -a "/proc/${pid}/fd/2" >&3) + else + exec 2>&3 + fi + exec 4>&- + fi + exec 3>&- + + # vgpu driver version is choosen dynamically during runtime, so pre-compile modules for + # only non-vgpu driver types + if [ "${DRIVER_TYPE}" != "vgpu" ]; then + # Install the userspace components and copy the kernel module sources. + if [ ! -e /usr/src/nvidia-${DRIVER_VERSION}/mkprecompiled ]; then + sh NVIDIA-Linux-$DRIVER_ARCH-$DRIVER_VERSION.run -x && \ + cd NVIDIA-Linux-$DRIVER_ARCH-$DRIVER_VERSION && \ + ./nvidia-installer --silent \ + --no-kernel-module \ + --no-nouveau-check \ + --no-nvidia-modprobe \ + --no-rpms \ + --no-backup \ + --no-check-for-alternate-installs \ + --no-libglx-indirect \ + --no-install-libglvnd \ + --x-prefix=/tmp/null \ + --x-module-path=/tmp/null \ + --x-library-path=/tmp/null \ + --x-sysconfig-path=/tmp/null && \ + mkdir -p /usr/src/nvidia-${DRIVER_VERSION} && \ + mv LICENSE mkprecompiled ${KERNEL_TYPE} /usr/src/nvidia-${DRIVER_VERSION} && \ + sed '9,${/^\(kernel\|LICENSE\)/!d}' .manifest > /usr/src/nvidia-${DRIVER_VERSION}/.manifest + fi + fi + + echo -e "\n========== NVIDIA Software Updater ==========\n" + echo -e "Starting update of NVIDIA driver version ${DRIVER_VERSION} for Linux kernel version ${KERNEL_VERSION}\n" + + trap "echo 'Caught signal'; exit 1" HUP INT QUIT PIPE TERM + + _update_package_cache + _resolve_kernel_version || exit 1 + _install_prerequisites + if _kernel_requires_package; then + _create_driver_package + fi + _remove_prerequisites + _cleanup_package_cache + + echo "Done" + exit 0 +} + +# Wait for MOFED drivers to be loaded and load nvidia-peermem whenever it gets unloaded during MOFED driver updates +reload_nvidia_peermem() { + if [ "$USE_HOST_MOFED" = "true" ]; then + until lsmod | grep mlx5_core > /dev/null 2>&1 && [ -f /run/nvidia/validations/.driver-ctr-ready ]; + do + echo "waiting for mellanox ofed and nvidia drivers to be installed" + sleep 10 + done + else + # use driver readiness flag created by MOFED container + until [ -f /run/mellanox/drivers/.driver-ready ] && [ -f /run/nvidia/validations/.driver-ctr-ready ]; + do + echo "waiting for mellanox ofed and nvidia drivers to be installed" + sleep 10 + done + fi + # get any parameters provided for nvidia-peermem + _get_module_params && set +o nounset + if chroot /run/nvidia/driver modprobe nvidia-peermem "${NVIDIA_PEERMEM_MODULE_PARAMS[@]}"; then + if [ -f /sys/module/nvidia_peermem/refcnt ]; then + echo "successfully loaded nvidia-peermem module, now waiting for signal" + sleep inf + trap "echo 'Caught signal'; exit 1" HUP INT QUIT PIPE TERM + fi + fi + echo "failed to load nvidia-peermem module" + exit 1 +} + +# probe by gpu-opertor for liveness/startup checks for nvidia-peermem module to be loaded when MOFED drivers are ready +probe_nvidia_peermem() { + if lsmod | grep mlx5_core > /dev/null 2>&1; then + if [ ! -f /sys/module/nvidia_peermem/refcnt ]; then + echo "nvidia-peermem module is not loaded" + return 1 + fi + else + echo "MOFED drivers are not ready, skipping probe to avoid container restarts..." + fi + return 0 +} + +usage() { + cat >&2 <