diff --git a/.github/workflows/build-docker-images.yml b/.github/workflows/build-docker-images.yml index a27d49b3..a12d27c2 100644 --- a/.github/workflows/build-docker-images.yml +++ b/.github/workflows/build-docker-images.yml @@ -1,21 +1,42 @@ name: build-docker-images on: + workflow_dispatch: push: branches: - 'master' jobs: + prepare: + runs-on: ubuntu-latest + outputs: + targets: ${{ steps.generate.outputs.targets }} + steps: + - + name: Checkout + uses: actions/checkout@v4 + - + name: List targets + id: generate + uses: docker/bake-action/subaction/list-targets@v4 + with: + target: atk + files: atk.hcl,atk.yml build: runs-on: ubuntu-latest + needs: + - prepare strategy: fail-fast: false matrix: - target: [vnc, dev, chrono] + target: ${{ fromJson(needs.prepare.outputs.targets) }} steps: - name: Checkout uses: actions/checkout@v4 + - + name: Copy atk.env to .env + run: cp atk.env .env - name: Set up QEMU uses: docker/setup-qemu-action@v3 diff --git a/atk.hcl b/atk.hcl new file mode 100644 index 00000000..a8e8d02e --- /dev/null +++ b/atk.hcl @@ -0,0 +1,20 @@ +// This file is used to define a group of targets to build in a github ci. +// In the github action, a target is defined (e.g. 'atk') which then converts into a +// list (as defined by the 'targets' variable) of targets to build. +// +// Example: +// group "atk" { +// targets = ["dev", "chrono", "vnc"] +// } + + +group "atk" { + targets = ["dev", "chrono", "vnc"] +} + +target "chrono" { + target = "chrono" + args = { + REMOVE_OPTIX = "true" + } +} diff --git a/atk.yml b/atk.yml index 1dea8daa..8d115b8c 100644 --- a/atk.yml +++ b/atk.yml @@ -27,7 +27,6 @@ x-optionals: name: art services: common: - env_file: "atk.env" build: context: "./" network: "host" @@ -42,6 +41,11 @@ services: volumes: - "./:/home/${COMPOSE_PROJECT_NAME}/${COMPOSE_PROJECT_NAME}" tty: true + x-bake: + platforms: + - linux/amd64 + - linux/arm64 + no-cache: true dev: extends: common image: "${DOCKERHUB_USERNAME}/${COMPOSE_PROJECT_NAME}:dev" @@ -56,15 +60,6 @@ services: USER_SHELL_ADD_ONS: ". /home/${COMPOSE_PROJECT_NAME}/${COMPOSE_PROJECT_NAME}/workspace/install/setup.bash" PIP_REQUIREMENTS: "${DEFAULT_PIP_REQUIREMENTS} tensorrt" ROSDEP_METAPACKAGE: "art_dev_meta" - x-bake: - platforms: - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - windows/arm64 - no-cache: true working_dir: "/home/${COMPOSE_PROJECT_NAME}/${COMPOSE_PROJECT_NAME}/workspace" agx: extends: dev @@ -76,6 +71,9 @@ services: APT_DEPENDENCIES: "${DEFAULT_APT_DEPENDENCIES}" PIP_REQUIREMENTS: "${DEFAULT_PIP_REQUIREMENTS}" ROSDEP_SKIP_KEYS: "python3-torchvision tf_transformations" + x-bake: + platforms: + - linux/arm64 art5: extends: agx image: "${DOCKERHUB_USERNAME}/${COMPOSE_PROJECT_NAME}:art5" @@ -119,6 +117,11 @@ services: network: "host" args: VNC_PASSWORD: "${COMPOSE_PROJECT_NAME}" + x-bake: + platforms: + - linux/amd64 + - linux/arm64 + no-cache: true ports: - "127.0.0.1:8080-8099:8080" - "127.0.0.1:5900-5999:5900" diff --git a/docker/chrono.dockerfile b/docker/chrono.dockerfile index 386c22fb..fd3d1a8b 100644 --- a/docker/chrono.dockerfile +++ b/docker/chrono.dockerfile @@ -7,7 +7,8 @@ INCLUDE ./docker/common/base.dockerfile # Snippets INCLUDE ./docker/snippets/ros.dockerfile -INCLUDE ./docker/snippets/chrono.dockerfile +INCLUDE ./docker/snippets/optix.dockerfile +INCLUDE ./docker/snippets/chrono-build.dockerfile # Will copy in other common configurations for this build INCLUDE ./docker/common/common.dockerfile diff --git a/docker/snippets/chrono.dockerfile b/docker/snippets/chrono-build.dockerfile similarity index 86% rename from docker/snippets/chrono.dockerfile rename to docker/snippets/chrono-build.dockerfile index 832a6c97..0a8657e3 100644 --- a/docker/snippets/chrono.dockerfile +++ b/docker/snippets/chrono-build.dockerfile @@ -1,6 +1,5 @@ # SPDX-License-Identifier: MIT # This snippet install Chrono in /opt/chrono -# NOTE: Requires OPTIX_SCRIPT to be set and for there be a file that exists there # NOTE: ROS needs to be installed, as well # Install Chrono dependencies @@ -23,14 +22,6 @@ RUN apt-get update && \ xorg-dev && \ apt-get clean && apt-get autoremove -y && rm -rf /var/lib/apt/lists/* -# OptiX -ARG OPTIX_SCRIPT -COPY ${OPTIX_SCRIPT} /tmp/optix.sh -RUN chmod +x /tmp/optix.sh && \ - mkdir /opt/optix && \ - /tmp/optix.sh --prefix=/opt/optix --skip-license && \ - rm /tmp/optix.sh - # Vulkan RUN wget -qO- https://packages.lunarg.com/lunarg-signing-key-pub.asc | tee /etc/apt/trusted.gpg.d/lunarg.asc && \ wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list http://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \ @@ -51,14 +42,14 @@ ARG CHRONO_BRANCH="main" ARG CHRONO_REPO="https://github.com/projectchrono/chrono.git" ARG CHRONO_DIR="${USERHOME}/chrono" ARG CHRONO_INSTALL_DIR="/opt/chrono" -RUN git clone --recursive -b ${CHRONO_BRANCH} ${CHRONO_REPO} ${CHRONO_DIR} && \ - . ${ROS_WORKSPACE_DIR}/install/setup.sh && \ - cd ${CHRONO_DIR}/contrib/build-scripts/vsg/ && \ - bash buildVSG.sh /opt/vsg && \ - cd ${CHRONO_DIR}/contrib/build-scripts/urdf/ && \ - bash buildURDF.sh /opt/urdf && \ - mkdir ${CHRONO_DIR}/build && \ +RUN git clone --recursive -b ${CHRONO_BRANCH} ${CHRONO_REPO} ${CHRONO_DIR} +RUN cd ${CHRONO_DIR}/contrib/build-scripts/vsg/ && \ + sudo bash buildVSG.sh /opt/vsg +RUN cd ${CHRONO_DIR}/contrib/build-scripts/urdf/ && \ + sudo bash buildURDF.sh /opt/urdf +RUN mkdir ${CHRONO_DIR}/build && \ cd ${CHRONO_DIR}/build && \ + . ${ROS_WORKSPACE_DIR}/install/setup.sh && \ cmake ../ -G Ninja \ -DCMAKE_BUILD_TYPE=Release \ -DBUILD_DEMOS=OFF \ @@ -90,6 +81,13 @@ RUN git clone --recursive -b ${CHRONO_BRANCH} ${CHRONO_REPO} ${CHRONO_DIR} && \ # chown the chrono dir so that we can edit it RUN chown -R ${USERNAME}:${USERNAME} ${CHRONO_DIR} ${ROS_WORKSPACE_DIR} +# Remove optix +# Due to licensing, we don't want to include this in the final image +ARG REMOVE_OPTIX="false" +RUN if [ "${REMOVE_OPTIX}" = "true" ]; then \ + rm -rf /opt/optix/*; \ + fi + # Update shell config RUN echo ". ${ROS_WORKSPACE_DIR}/install/setup.sh" >> ${USERSHELLPROFILE} && \ echo "export PYTHONPATH=\$PYTHONPATH:${CHRONO_INSTALL_DIR}/share/chrono/python" >> ${USERSHELLPROFILE} && \ diff --git a/docker/snippets/optix.dockerfile b/docker/snippets/optix.dockerfile new file mode 100644 index 00000000..d6ee9f1b --- /dev/null +++ b/docker/snippets/optix.dockerfile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: MIT +# This snippet will install OptiX in /opt/optix +# NOTE: Requires OPTIX_SCRIPT to be set and for there be a file that exists there + +# OptiX +ARG OPTIX_SCRIPT +COPY ${OPTIX_SCRIPT} /tmp/optix.sh +RUN chmod +x /tmp/optix.sh && \ + mkdir /opt/optix && \ + /tmp/optix.sh --prefix=/opt/optix --skip-license && \ + rm /tmp/optix.sh diff --git a/docs/README.md b/docs/README.md index 0b78f591..24a7286d 100644 --- a/docs/README.md +++ b/docs/README.md @@ -58,7 +58,7 @@ WARN[0000] The "DISPLAY" variable is not set. Defaulting to a blank string. ### Launch the simulation -The first time you start up the chrono service, it will need to build the image. This may take a while. +The first time you start up the chrono service, it will need to build/pull the image. This may take a while. ```bash $ atk dev -ua -s chrono --optionals gpus vnc @@ -80,7 +80,7 @@ Initialized ChROSInterface: chrono_ros_node. ### Build and run the autonomy stack -The first time you start up the dev service, it will need to build the image. This may take a while. +The first time you start up the dev service, it will need to build/pull the image. This may take a while. > [!NOTE] > The very first time you run `colcon build`, you may need to install the `bluespace_ai_xsens_ros_mti_driver` library. To do that, run the following: diff --git a/docs/design/dockerfiles.md b/docs/design/dockerfiles.md index 216d2df7..9ac04696 100644 --- a/docs/design/dockerfiles.md +++ b/docs/design/dockerfiles.md @@ -15,7 +15,9 @@ docker/ │ ├── common.dockerfile │ └── final.dockerfile ├── snippets/ -│ ├── chrono.dockerfile +│ ├── agx.dockerfile +│ ├── chrono-build.dockerfile +│ ├── optix.dockerfile │ ├── ros.dockerfile │ └── rosdep.dockerfile ├── chrono.dockerfile @@ -29,7 +31,7 @@ docker/ ## `docker/data/` This folder holds data files that may be used by dockerfile snippets. For example, -the [`docker/snippets/chrono.dockerfile`](../../docker/snippets/chrono.dockerfile) requires the OptiX build script; this file should go here. +the [`docker/snippets/optix.dockerfile`](../../docker/snippets/optix.dockerfile) requires the OptiX build script; this file should go here. ## `docker/common/` @@ -104,7 +106,7 @@ set the `CMD` to be `${USERSHELLPATH}`. This folder contains dockerfile "snippets", or small scripts that are included in service dockerfiles to build specific packages, such as Chrono or ROS. -### `docker/snippets/chrono.dockerfile` +### `docker/snippets/chrono-build.dockerfile` This file builds Chrono from source. It currently builds a non-configurable list of chrono modules that is listed below: @@ -119,10 +121,6 @@ chrono modules that is listed below: Furthermore, it also builds [`chrono_ros_interfaces`](https://github.com/projectchrono/chrono_ros_interfaces). This is required to build `Chrono::ROS`. -**OPTIX_SCRIPT**: The location _on the host_ that the optix script is located at. This -script can be found on NVIDIA's OptiX downloads page. For more information, see the -[FAQs](./../misc/faq.md#optix-install). - **ROS_DISTRO**: The ROS distro to use. **ROS_WORKSPACE_DIR** _(Default: `${USERHOME}/ros_workspace`)_. The directory to build @@ -146,6 +144,8 @@ as this is _not_ a volume. The user profile is updated to add the python binary directory to `PYTHONPATH` and the lib directory is appended to `LD_LIBRARY_PATH`. +**REMOVE_OPTIX** _(Default: `false`)_: Whether to remove the optix library after building chrono. This should be `'true'` if you plan to push the image to some public repository. + ### `docker/snippets/ros.dockerfile` To decrease image size and allow easy customization, ROS is installed separately (as @@ -156,6 +156,14 @@ snippet will install ROS here. **ROS_INSTALL_PREFIX** _(Default: `/opt/ros/${ROS_DISTRO}`)_: The install prefix that ROS is installed to. This should be the folder location of the `setup.bash` file. By default, if installed through `apt` it will be `/opt/ros/${ROS_DISTRO}`. If it's pre-installed for tegra images, it's at `/opt/ros/${ROS_DISTRO}/install`. +### `docker/snippets/optix.dockerfile` + +This snippet installs optix in the image. It expects an optix script to be present on the host. + +**OPTIX_SCRIPT**: The location _on the host_ that the optix script is located at. This +script can be found on NVIDIA's OptiX downloads page. For more information, see the +[FAQs](./../misc/faq.md#optix-install). + ### `docker/snippets/rosdep.dockerfile` `rosdep` is a useful tool in ROS that parses nested packages, looks inside each @@ -209,3 +217,9 @@ Below is some additional information for people interested in the underlying wor ### `dockerfile-x` In order to be more extensible and general purpose, the dockerfiles mentioned below were built around `dockerfile-x`. [`dockerfile-x`](https://github.com/devthefuture-org/dockerfile-x) is a docker plugin that supports importing of other dockerfiles through the `INCLUDE` docker build action. Using `INCLUDE`, we can construct service dockerfiles that mix and match different [snippets](#dockersnippets) that we implement. + +### Docker Hub + +To expedite start up time, multi-platform builds have been made available on [uwsbel's Docker Hub profile](https://hub.docker.com/repository/docker/uwsbel/art/general). These are automatically built using [Github Actions](https://github.com/features/actions) (see the [workflow file](./../../.github/workflows/build-docker-images.yml)). + +Leveraging `docker buildx bake`, images can be built on a specific architecture for other achitectures. What this means is that we can use a basic Github runner to build images that can be used on multiple OS's and/or hardware (e.g. MacOS, arm, x86, etc.). This means that when spinning up the images on our system, building the images locally is no longer required. Furthermore, when an update is pushed to the repository, because Github Actions are automated on pushes to `master`, new images will be built and pushed to Docker Hub; retrieving the new images is as easy as `atk dev -c pull -s `. diff --git a/docs/misc/faq.md b/docs/misc/faq.md index b0e410f2..785c7fb1 100644 --- a/docs/misc/faq.md +++ b/docs/misc/faq.md @@ -41,3 +41,16 @@ USER_GID=1001 ## OptiX Install Chrono currently builds against OptiX 7.7. In order to install OptiX in the container, you need to install the OptiX build script that you download from [their website](https://developer.nvidia.com/designworks/optix/downloads/legacy). Then place the script in the `docker/data/` directory. Files in this folder are ignored by git, so no worries there. + +If you are pulling the image from [Docker Hub](https://hub.docker.com/repository/docker/uwsbel/art/general), you will also need to run the following. The image on Docker Hub doesn't include the OptiX library since it's under a non-permissive license. This command will just copy the OptiX library to the location Chrono expects and then save the image. + +```bash +$ atk dev -ua -s chrono +WARNING | logger.set_verbosity :: Verbosity has been set to WARNING +[+] Running 1/1 + ✔ Container art-chrono Started +art@art-chrono:~/art/sim$ cd ../docker/data/ +art@art-chrono:~/art/sim$ sudo bash .sh --prefix=/opt/optix --skip-license +art@art-chrono:~/art/sim$ exit +$ container_id=$(atk dev -c ps -s chrono --compose-arg='-q'); docker commit $container_id $(docker inspect -f '{{.Config.Image}}' $container_id) +``` diff --git a/docs/usage/how_to_run.md b/docs/usage/how_to_run.md index 997a7e3a..eb7ed8fe 100644 --- a/docs/usage/how_to_run.md +++ b/docs/usage/how_to_run.md @@ -18,9 +18,10 @@ This is a quick review of some `atk` concepts and how to do some basic operation ### Building the services -Explicitly building the services is not actually required. When you run the `up` command for the first time, the images will also be built. +> [!TIP] +> Explicitly building the services is not actually required. We have pre-built images on [Docker Hub](https://hub.docker.com/repository/docker/uwsbel/art/general) which will be pulled when you use the `up` command. -At the moment, there are three services: `vnc`, `chrono`, and `dev`. We can build all of these in one go: +Let's say you want to build three services: `vnc`, `chrono`, and `dev`. We can build all of these in one go: ```bash $ atk dev -b -s vnc chrono dev