-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Signed-off-by: Andrej Orsula <[email protected]>
- Loading branch information
0 parents
commit 974eae0
Showing
67 changed files
with
6,519 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,70 @@ | ||
{ | ||
"name": "${localWorkspaceFolderBasename}", | ||
"build": { | ||
"context": "${localWorkspaceFolder}", | ||
"dockerfile": "${localWorkspaceFolder}/Dockerfile", | ||
"cacheFrom": "andrejorsula/omniverse_rs" | ||
}, | ||
"workspaceFolder": "/root/ws", | ||
"workspaceMount": "type=bind,source=${localWorkspaceFolder},target=/root/ws", | ||
"runArgs": [ | ||
// Network mode | ||
"--network=host", | ||
"--ipc=host", | ||
// NVIDIA GPU | ||
"--gpus=all", | ||
// Other GPUs | ||
"--device=/dev/dri:/dev/dri", | ||
"--group-add=video" | ||
], | ||
"mounts": [ | ||
// Local time | ||
"type=bind,source=/etc/localtime,target=/etc/localtime,readonly", | ||
// Input devices | ||
"type=bind,source=/dev/input,target=/dev/input", | ||
// GUI (X11) | ||
"type=bind,source=/tmp/.X11-unix,target=/tmp/.X11-unix", | ||
"type=bind,source=${localEnv:TMPDIR:/tmp}/xauth_docker_vsc_${localWorkspaceFolderBasename},target=${localEnv:TMPDIR:/tmp}/xauth_docker_vsc_${localWorkspaceFolderBasename}", | ||
// Isaac Sim | ||
"type=bind,source=${localEnv:HOME}/.nvidia-omniverse/docker/cache/computecache,target=/root/.nv/ComputeCache", | ||
"type=bind,source=${localEnv:HOME}/.nvidia-omniverse/docker/cache/glcache,target=/root/.cache/nvidia/GLCache", | ||
"type=bind,source=${localEnv:HOME}/.nvidia-omniverse/docker/cache/kit,target=/root/isaac_sim/cache/Kit", | ||
"type=bind,source=${localEnv:HOME}/.nvidia-omniverse/docker/cache/ov,target=/root/.cache/ov", | ||
"type=bind,source=${localEnv:HOME}/.nvidia-omniverse/docker/cache/pip,target=/root/.cache/pip", | ||
"type=bind,source=${localEnv:HOME}/.nvidia-omniverse/docker/data,target=/root/.local/share/ov/data", | ||
"type=bind,source=${localEnv:HOME}/.nvidia-omniverse/docker/documents,target=/root/Documents", | ||
"type=bind,source=${localEnv:HOME}/.nvidia-omniverse/docker/logs,target=/root/.nvidia-omniverse/logs" | ||
], | ||
"containerEnv": { | ||
// GUI (X11) | ||
"DISPLAY": "${localEnv:DISPLAY}", | ||
"XAUTHORITY": "${localEnv:TMPDIR:/tmp}/xauth_docker_vsc_${localWorkspaceFolderBasename}", | ||
// NVIDIA GPU | ||
"NVIDIA_VISIBLE_DEVICES": "all", | ||
"NVIDIA_DRIVER_CAPABILITIES": "all" | ||
}, | ||
"initializeCommand": "XAUTH=\"${localEnv:TMPDIR:/tmp}/xauth_docker_vsc_${localWorkspaceFolderBasename}\"; touch \"${XAUTH}\"; chmod a+r \"${XAUTH}\"; XAUTH_LIST=$(xauth nlist \"${localEnv:DISPLAY}\"); if [ -n \"${XAUTH_LIST}\" ]; then echo \"${XAUTH_LIST}\" | sed -e 's/^..../ffff/' | xauth -f \"${XAUTH}\" nmerge -; fi", | ||
"customizations": { | ||
"vscode": { | ||
"extensions": [ | ||
// Rust | ||
"rust-lang.rust-analyzer", | ||
"serayuzgur.crates", | ||
"vadimcn.vscode-lldb", | ||
// Python | ||
"ms-python.black-formatter", | ||
"ms-python.isort", | ||
"ms-python.python", | ||
"ms-python.vscode-pylance", | ||
// Toml | ||
"tamasfe.even-better-toml", | ||
// Yaml | ||
"redhat.vscode-yaml", | ||
// Utils | ||
"christian-kohler.path-intellisense", | ||
// Miscellaneous | ||
"GitHub.copilot" | ||
] | ||
} | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,28 @@ | ||
#!/usr/bin/env bash | ||
set -e | ||
|
||
SCRIPT_DIR="$(cd "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" &>/dev/null && pwd)" | ||
REPOSITORY_DIR="$(dirname "${SCRIPT_DIR}")" | ||
|
||
## Determine the workspace folder | ||
if [[ -n "$1" ]]; then | ||
# Use the first argument as the workspace folder if provided | ||
WORKSPACE_FOLDER="$1" | ||
else | ||
# Otherwise, try to extract the workspace folder from `./devcontainer.json` | ||
WORKSPACE_FOLDER="$(grep -Po '"workspaceFolder":.*?[^\\]",' "${SCRIPT_DIR}/devcontainer.json" | cut -d'"' -f4 || true)" | ||
if [[ -z "${WORKSPACE_FOLDER}" ]]; then | ||
# If `./devcontainer.json` does not contain the workspace folder, default to the root | ||
WORKSPACE_FOLDER="/" | ||
fi | ||
fi | ||
|
||
## Open the Dev Container in VS Code | ||
CODE_REMOTE_CMD=( | ||
code --remote | ||
"dev-container+$(printf "%s" "${REPOSITORY_DIR}" | xxd -p | tr -d "[:space:]")" | ||
"${WORKSPACE_FOLDER}" | ||
) | ||
echo -e "\033[1;90m${CODE_REMOTE_CMD[*]}\033[0m" | xargs | ||
# shellcheck disable=SC2048 | ||
exec ${CODE_REMOTE_CMD[*]} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,51 @@ | ||
#!/usr/bin/env bash | ||
set -e | ||
|
||
## Configuration | ||
# Default Docker Hub user (used if user is not logged in) | ||
DEFAULT_DOCKERHUB_USER="andrejorsula" | ||
|
||
## If the current user is not in the docker group, all docker commands will be run as root | ||
WITH_SUDO="" | ||
if ! grep -qi /etc/group -e "docker.*${USER}"; then | ||
echo "INFO: The current user ${USER} is not detected in the docker group. All docker commands will be run as root." | ||
WITH_SUDO="sudo" | ||
fi | ||
|
||
## Determine the name of the image to run (automatically inferred from the current user and repository, or using the default if not available) | ||
# Get the current Docker Hub user or use the default | ||
DOCKERHUB_USER="$(${WITH_SUDO} docker info | sed '/Username:/!d;s/.* //')" | ||
DOCKERHUB_USER="${DOCKERHUB_USER:-${DEFAULT_DOCKERHUB_USER}}" | ||
# Get the name of the repository (directory) | ||
SCRIPT_DIR="$(cd "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" &>/dev/null && pwd)" | ||
REPOSITORY_DIR="$(dirname "${SCRIPT_DIR}")" | ||
if [[ -f "${REPOSITORY_DIR}/Dockerfile" ]]; then | ||
REPOSITORY_NAME="$(basename "${REPOSITORY_DIR}")" | ||
else | ||
echo >&2 -e "\033[1;31mERROR: Cannot build Docker image because \"${REPOSITORY_DIR}/Dockerfile\" does not exist.\033[0m" | ||
exit 1 | ||
fi | ||
# Combine the user and repository name to form the image name | ||
IMAGE_NAME="${DOCKERHUB_USER}/${REPOSITORY_NAME}" | ||
|
||
## Parse TAG and forward additional build arguments | ||
if [ "${#}" -gt "0" ]; then | ||
if [[ "${1}" != "-"* ]]; then | ||
IMAGE_NAME="${IMAGE_NAME}:${1}" | ||
BUILD_ARGS=${*:2} | ||
else | ||
BUILD_ARGS=${*:1} | ||
fi | ||
fi | ||
|
||
## Build the image | ||
# shellcheck disable=SC2206 | ||
DOCKER_BUILD_CMD=( | ||
${WITH_SUDO} docker build | ||
"${REPOSITORY_DIR}" | ||
--tag "${IMAGE_NAME}" | ||
"${BUILD_ARGS}" | ||
) | ||
echo -e "\033[1;90m${DOCKER_BUILD_CMD[*]}\033[0m" | xargs | ||
# shellcheck disable=SC2048 | ||
exec ${DOCKER_BUILD_CMD[*]} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,14 @@ | ||
#!/usr/bin/env bash | ||
set -e | ||
|
||
## Determine the host directory to be mounted as a development volume | ||
SCRIPT_DIR="$(cd "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" &>/dev/null && pwd)" | ||
REPOSITORY_DIR="$(dirname "${SCRIPT_DIR}")" | ||
DEV_VOLUME_HOST_DIR="${DEV_VOLUME_HOST_DIR:-"${REPOSITORY_DIR}"}" | ||
|
||
## Determine the docker directory where the development volume will be mounted | ||
DEV_VOLUME_DOCKER_DIR="${DEV_VOLUME_DOCKER_DIR:-"/root/ws"}" | ||
|
||
## Run the docker container with the development volume mounted | ||
echo -e "\033[2;37mDevelopment volume: ${DEV_VOLUME_HOST_DIR} -> ${DEV_VOLUME_DOCKER_DIR}\033[0m" | xargs | ||
exec "${SCRIPT_DIR}/run.bash" -v "${DEV_VOLUME_HOST_DIR}:${DEV_VOLUME_DOCKER_DIR}" "${@}" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,127 @@ | ||
#!/usr/bin/env bash | ||
set -e | ||
|
||
## Install curl if missing | ||
if ! command -v curl >/dev/null 2>&1; then | ||
if command -v apt-get >/dev/null 2>&1; then | ||
sudo apt-get install -y curl | ||
elif command -v dnf >/dev/null 2>&1; then | ||
sudo dnf install -y curl | ||
elif command -v yum >/dev/null 2>&1; then | ||
sudo yum install -y curl | ||
fi | ||
fi | ||
|
||
## Install Docker via the convenience script | ||
curl -fsSL https://get.docker.com | sh | ||
sudo systemctl enable --now docker | ||
|
||
## (Optional) Install support for NVIDIA if an NVIDIA GPU is detected and the installation is requested | ||
check_nvidia_gpu() { | ||
if ! lshw -C display 2>/dev/null | grep -qi "vendor.*nvidia"; then | ||
return 1 # NVIDIA GPU is not present | ||
elif ! command -v nvidia-smi >/dev/null 2>&1; then | ||
return 1 # NVIDIA GPU is present but nvidia-utils not installed | ||
elif ! nvidia-smi -L &>/dev/null; then | ||
return 1 # NVIDIA GPU is present but is not working properly | ||
else | ||
return 0 # NVIDIA GPU is present and appears to be working | ||
fi | ||
} | ||
configure_nvidia_apt_repository() { | ||
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | | ||
sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg && | ||
curl -sL https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | | ||
sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | | ||
sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list | ||
sudo apt-get update | ||
} | ||
if check_nvidia_gpu; then | ||
echo -e "INFO: NVIDIA GPU detected." | ||
DOCKER_VERSION="$(sudo docker version --format '{{.Server.Version}}' 2>/dev/null)" | ||
MIN_VERSION_FOR_TOOLKIT="19.3" | ||
if [ "$(printf '%s\n' "${MIN_VERSION_FOR_TOOLKIT}" "${DOCKER_VERSION}" | sort -V | head -n1)" = "$MIN_VERSION_FOR_TOOLKIT" ]; then | ||
if ! command -v nvidia-container-toolkit >/dev/null 2>&1; then | ||
while true; do | ||
read -erp "Do you want to install NVIDIA Container Toolkit? [Y/n]: " INSTALL_NVIDIA_CONTAINER_TOOLKIT | ||
case "${INSTALL_NVIDIA_CONTAINER_TOOLKIT,,}" in | ||
"" | y | yes) | ||
INSTALL_NVIDIA_CONTAINER_TOOLKIT=true | ||
break | ||
;; | ||
n | no) | ||
INSTALL_NVIDIA_CONTAINER_TOOLKIT=false | ||
break | ||
;; | ||
esac | ||
done | ||
if [[ "${INSTALL_NVIDIA_CONTAINER_TOOLKIT}" = true ]]; then | ||
if command -v apt-get >/dev/null 2>&1; then | ||
configure_nvidia_apt_repository | ||
sudo apt-get install -y nvidia-container-toolkit | ||
elif command -v yum >/dev/null 2>&1; then | ||
curl -s -L https://nvidia.github.io/libnvidia-container/stable/rpm/nvidia-container-toolkit.repo | | ||
sudo tee /etc/yum.repos.d/nvidia-container-toolkit.repo | ||
sudo yum install -y nvidia-container-toolkit | ||
else | ||
echo >&2 -e "\033[1;31mERROR: Supported package manager not found. Please install nvidia-container-toolkit manually.\033[0m" | ||
fi | ||
sudo systemctl restart --now docker | ||
fi | ||
else | ||
echo -e "INFO: NVIDIA Container Toolkit is already installed." | ||
fi | ||
else | ||
if ! command -v nvidia-docker >/dev/null 2>&1; then | ||
while true; do | ||
read -erp "Do you want to install NVIDIA Docker [Y/n]: " INSTALL_NVIDIA_DOCKER | ||
case "${INSTALL_NVIDIA_DOCKER,,}" in | ||
"" | y | yes) | ||
INSTALL_NVIDIA_DOCKER=true | ||
break | ||
;; | ||
n | no) | ||
INSTALL_NVIDIA_DOCKER=false | ||
break | ||
;; | ||
esac | ||
done | ||
if [[ "${INSTALL_NVIDIA_DOCKER}" = true ]]; then | ||
if command -v apt-get >/dev/null 2>&1; then | ||
configure_nvidia_apt_repository | ||
sudo apt-get install -y nvidia-docker2 | ||
else | ||
echo >&2 -e "\033[1;31mERROR: Supported package manager not found. Please install nvidia-docker2 manually.\033[0m" | ||
fi | ||
sudo systemctl restart --now docker | ||
fi | ||
else | ||
echo -e "INFO: NVIDIA Docker is already installed." | ||
fi | ||
fi | ||
fi | ||
|
||
if [[ $(grep /etc/group -e "docker") != *"${USER}"* ]]; then | ||
[ -z "${PS1}" ] | ||
## (Optional) Add user to docker group | ||
while true; do | ||
read -erp "Do you want to add the current user ${USER} to the docker group? [Y/n]: " ADD_USER_TO_DOCKER_GROUP | ||
case "${ADD_USER_TO_DOCKER_GROUP,,}" in | ||
"" | y | yes) | ||
ADD_USER_TO_DOCKER_GROUP=true | ||
break | ||
;; | ||
n | no) | ||
ADD_USER_TO_DOCKER_GROUP=false | ||
break | ||
;; | ||
esac | ||
done | ||
if [[ "${ADD_USER_TO_DOCKER_GROUP}" = true ]]; then | ||
sudo groupadd -f docker | ||
sudo usermod -aG docker "${USER}" | ||
echo -e "INFO: The current user ${USER} was added to the docker group. Please log out and log back in to apply the changes. Alternatively, run the following command to apply the changes in each new shell until you log out:\n\n\tnewgrp docker\n" | ||
fi | ||
else | ||
echo -e "INFO: The current user ${USER} is already in the docker group." | ||
fi |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,100 @@ | ||
#!/usr/bin/env bash | ||
set -e | ||
|
||
## Configuration | ||
# Default repository name (used if inferred name cannot be determined) | ||
DEFAULT_REPOSITORY_NAME="omniverse_rs" | ||
# Flags for executing a command inside the container | ||
DOCKER_EXEC_OPTS="${DOCKER_EXEC_OPTS:- | ||
--interactive | ||
--tty | ||
}" | ||
# Default command to execute inside the container | ||
DEFAULT_CMD="bash" | ||
|
||
## If the current user is not in the docker group, all docker commands will be run as root | ||
WITH_SUDO="" | ||
if ! grep -qi /etc/group -e "docker.*${USER}"; then | ||
echo "INFO: The current user ${USER} is not detected in the docker group. All docker commands will be run as root." | ||
WITH_SUDO="sudo" | ||
fi | ||
|
||
## Get the name of the repository (directory) or use the default | ||
SCRIPT_DIR="$(cd "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" &>/dev/null && pwd)" | ||
REPOSITORY_DIR="$(dirname "${SCRIPT_DIR}")" | ||
if [[ -f "${REPOSITORY_DIR}/Dockerfile" ]]; then | ||
CONTAINER_NAME="$(basename "${REPOSITORY_DIR}")" | ||
else | ||
CONTAINER_NAME="${DEFAULT_REPOSITORY_NAME}" | ||
fi | ||
|
||
## Parse CMD if provided | ||
if [ "${#}" -gt "0" ]; then | ||
# If the first argument is a positive integer, it is parsed as the suffix of the container name | ||
if [[ "${1}" =~ ^[0-9]+$ ]]; then | ||
CONTAINER_NAME_SUFFIX="${1}" | ||
if [ "${#}" -gt "1" ]; then | ||
CMD=${*:2} | ||
else | ||
CMD="${DEFAULT_CMD}" | ||
fi | ||
else | ||
CMD=${*:1} | ||
fi | ||
else | ||
CMD="${DEFAULT_CMD}" | ||
fi | ||
|
||
## Verify that the container is active | ||
RUNNING_CONTAINERS=$(${WITH_SUDO} docker container list --format "{{.Names}}" | grep -i "${CONTAINER_NAME}" || :) | ||
RUNNING_CONTAINERS_COUNT=$(echo "${RUNNING_CONTAINERS}" | wc -w) | ||
if [ "${RUNNING_CONTAINERS_COUNT}" -eq "0" ]; then | ||
echo >&2 -e "\033[1;31mERROR: There are no active containers with the name \"${CONTAINER_NAME}\". Start the container first before attempting to join it.\033[0m" | ||
exit 1 | ||
fi | ||
|
||
print_running_containers_and_usage() { | ||
RUNNING_CONTAINERS=$(echo "${RUNNING_CONTAINERS}" | sort --version-sort) | ||
echo >&2 "Active *${CONTAINER_NAME}* containers:" | ||
i=0 | ||
echo "${RUNNING_CONTAINERS}" | while read -r line; do | ||
echo >&2 -e "\t${i}: ${line}" | ||
i=$((i + 1)) | ||
done | ||
echo >&2 "Usage: ${0} [CONTAINER_NAME_SUFFIX] [CMD]" | ||
} | ||
## If provided, append the numerical suffix to the container name | ||
if [[ -n "${CONTAINER_NAME_SUFFIX}" ]]; then | ||
if [ "${CONTAINER_NAME_SUFFIX}" -eq "0" ]; then | ||
CONTAINER_NAME_SUFFIX="" | ||
fi | ||
# Make sure that the container with the specified suffix is active | ||
if ! echo "${RUNNING_CONTAINERS}" | grep -qi "${CONTAINER_NAME}${CONTAINER_NAME_SUFFIX}"; then | ||
echo >&2 -e "\033[1;31mERROR: Invalid argument \"${CONTAINER_NAME_SUFFIX}\" — there is no active container with the name \"${CONTAINER_NAME}${CONTAINER_NAME_SUFFIX}\".\033[0m" | ||
print_running_containers_and_usage | ||
exit 2 | ||
fi | ||
CONTAINER_NAME="${CONTAINER_NAME}${CONTAINER_NAME_SUFFIX}" | ||
else | ||
# Otherwise, check if there is only one active container with the specified name | ||
if [ "${RUNNING_CONTAINERS_COUNT}" -gt "1" ]; then | ||
echo >&2 -e "\033[1;31mERROR: More than one active *${CONTAINER_NAME}* container. Specify the suffix of the container name as the first argument.\033[0m" | ||
print_running_containers_and_usage | ||
exit 2 | ||
else | ||
# If there is only one active container, use it regardless of the suffix | ||
CONTAINER_NAME="${RUNNING_CONTAINERS}" | ||
fi | ||
fi | ||
|
||
## Execute command inside the container | ||
# shellcheck disable=SC2206 | ||
DOCKER_EXEC_CMD=( | ||
${WITH_SUDO} docker exec | ||
"${DOCKER_EXEC_OPTS}" | ||
"${CONTAINER_NAME}" | ||
"${CMD}" | ||
) | ||
echo -e "\033[1;90m${DOCKER_EXEC_CMD[*]}\033[0m" | xargs | ||
# shellcheck disable=SC2048 | ||
exec ${DOCKER_EXEC_CMD[*]} |
Oops, something went wrong.