Skip to content

Commit

Permalink
Merge branch 'og-develop' into feat/osc
Browse files Browse the repository at this point in the history
  • Loading branch information
cremebrule authored Nov 22, 2023
2 parents a219a8e + 3a95122 commit a476db4
Show file tree
Hide file tree
Showing 46 changed files with 5,577 additions and 267 deletions.
9 changes: 8 additions & 1 deletion .github/workflows/build-push-containers.yml
Original file line number Diff line number Diff line change
Expand Up @@ -89,21 +89,28 @@ jobs:
type=semver,pattern={{version}}
-
name: Build and push dev image
id: build-dev
uses: docker/build-push-action@v5
with:
context: .
push: true
tags: ${{ steps.meta-dev.outputs.tags }}
labels: ${{ steps.meta-dev.outputs.labels }}
file: docker/dev.Dockerfile
cache-from: type=gha
cache-to: type=gha,mode=max

- name: Update prod image Dockerfile with dev image tag
run: |
sed -i "s/omnigibson-dev:latest/omnigibson-dev@${{ steps.build-dev.outputs.digest }}/g" docker/prod.Dockerfile && cat docker/prod.Dockerfile
-
name: Build and push prod image
uses: docker/build-push-action@v5
with:
context: .
push: true
tags: ${{ steps.meta-prod.outputs.tags }}
labels: ${{ steps.meta-prod.outputs.labels }}
file: docker/prod.Dockerfile
cache-from: type=gha
cache-to: type=gha,mode=max
cache-to: type=gha,mode=max
5 changes: 5 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,11 @@

-------

## Need support? Join our Discord!
<a href="https://discord.gg/bccR5vGFEx"><img src="https://discordapp.com/api/guilds/1166422812160966707/widget.png?style=banner3"></a>

-------

## Latest Updates
- [08/04/23] **v0.2.0**: More assets! 600 pre-sampled tasks, 7 new scenes, and many new objects 📈 [[release notes]](https://github.com/StanfordVL/OmniGibson/releases/tag/v0.2.0)

Expand Down
16 changes: 9 additions & 7 deletions docker/dev.Dockerfile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM nvcr.io/nvidia/isaac-sim:2023.1.0
FROM nvcr.io/nvidia/isaac-sim:2023.1.0-hotfix.1

# Set up all the prerequisites.
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
Expand Down Expand Up @@ -38,22 +38,24 @@ ENV MAKEFLAGS="-j `nproc`"
RUN micromamba run -n omnigibson micromamba install -c conda-forge boost && \
micromamba run -n omnigibson pip install pyplusplus && \
git clone https://github.com/ompl/ompl.git /ompl && \
mkdir -p /ompl/build/Release
mkdir -p /ompl/build/Release && \
sed -i "s/find_program(PYPY/# find_program(PYPY/g" /ompl/CMakeModules/Findpypy.cmake

# Build and install OMPL
RUN cd /ompl/build/Release && \
# Build and install OMPL
RUN micromamba run -n omnigibson /bin/bash --login -c 'source /isaac-sim/setup_conda_env.sh && (which python > /root/PYTHON_EXEC) && (echo $PYTHONPATH > /root/PYTHONPATH)' && \
cd /ompl/build/Release && \
micromamba run -n omnigibson cmake ../.. \
-DCMAKE_INSTALL_PREFIX="$CONDA_PREFIX" \
-DBOOST_ROOT="$CONDA_PREFIX" \
-DPYTHON_EXEC=/micromamba/envs/omnigibson/bin/python3.10 \
-DPYTHONPATH=/micromamba/envs/omnigibson/lib/python3.10/site-packages && \
-DPYTHON_EXEC=$(cat /root/PYTHON_EXEC) \
-DPYTHONPATH=$(cat /root/PYTHONPATH) && \
micromamba run -n omnigibson make -j 4 update_bindings && \
micromamba run -n omnigibson make -j 4 && \
cd py-bindings && \
micromamba run -n omnigibson make install

# Test OMPL
RUN micromamba run -n omnigibson python -c "import ompl"
RUN micromamba run -n omnigibson python -c "from ompl import base"

ENTRYPOINT ["micromamba", "run", "-n", "omnigibson"]

Expand Down
2 changes: 1 addition & 1 deletion docker/prod.Dockerfile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM stanfordvl/omnigibson-dev
FROM stanfordvl/omnigibson-dev:latest

# Copy over omnigibson source
ADD . /omnigibson-src
Expand Down
71 changes: 71 additions & 0 deletions docker/sbatch_example.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
#!/usr/bin/env bash
#SBATCH --account=cvgl
#SBATCH --partition=svl --qos=normal
#SBATCH --nodes=1
#SBATCH --cpus-per-task=8
#SBATCH --mem=30G
#SBATCH --gres=gpu:2080ti:1

IMAGE_PATH="/cvgl2/u/cgokmen/omnigibson.sqsh"
GPU_ID=$(nvidia-smi -L | grep -oP '(?<=GPU-)[a-fA-F0-9\-]+' | head -n 1)
ISAAC_CACHE_PATH="/scr-ssd/${SLURM_JOB_USER}/isaac_cache_${GPU_ID}"

# Define env kwargs to pass
declare -A ENVS=(
[NVIDIA_DRIVER_CAPABILITIES]=all
[NVIDIA_VISIBLE_DEVICES]=0
[DISPLAY]=""
[OMNIGIBSON_HEADLESS]=1
)
for env_var in "${!ENVS[@]}"; do
# Add to env kwargs we'll pass to enroot command later
ENV_KWARGS="${ENV_KWARGS} --env ${env_var}=${ENVS[${env_var}]}"
done

# Define mounts to create (maps local directory to container directory)
declare -A MOUNTS=(
[/scr-ssd/og-data-0-2-1]=/data
[${ISAAC_CACHE_PATH}/isaac-sim/kit/cache/Kit]=/isaac-sim/kit/cache/Kit
[${ISAAC_CACHE_PATH}/isaac-sim/cache/ov]=/root/.cache/ov
[${ISAAC_CACHE_PATH}/isaac-sim/cache/pip]=/root/.cache/pip
[${ISAAC_CACHE_PATH}/isaac-sim/cache/glcache]=/root/.cache/nvidia/GLCache
[${ISAAC_CACHE_PATH}/isaac-sim/cache/computecache]=/root/.nv/ComputeCache
[${ISAAC_CACHE_PATH}/isaac-sim/logs]=/root/.nvidia-omniverse/logs
[${ISAAC_CACHE_PATH}/isaac-sim/config]=/root/.nvidia-omniverse/config
[${ISAAC_CACHE_PATH}/isaac-sim/data]=/root/.local/share/ov/data
[${ISAAC_CACHE_PATH}/isaac-sim/documents]=/root/Documents
# Feel free to include lines like the below to mount a workspace or a custom OG version
# [/cvgl2/u/cgokmen/OmniGibson]=/omnigibson-src
# [/cvgl2/u/cgokmen/my-project]=/my-project
)

MOUNT_KWARGS=""
for mount in "${!MOUNTS[@]}"; do
# Verify mount path in local directory exists, otherwise, create it
if [ ! -e "$mount" ]; then
mkdir -p ${mount}
fi
# Add to mount kwargs we'll pass to enroot command later
MOUNT_KWARGS="${MOUNT_KWARGS} --mount ${mount}:${MOUNTS[${mount}]}"
done

# Create the image if it doesn't already exist
CONTAINER_NAME=omnigibson_${GPU_ID}
enroot create --force --name ${CONTAINER_NAME} ${IMAGE_PATH}

# Remove leading space in string
ENV_KWARGS="${ENV_KWARGS:1}"
MOUNT_KWARGS="${MOUNT_KWARGS:1}"

# The last line here is the command you want to run inside the container.
# Here I'm running some unit tests.
enroot start \
--root \
--rw \
${ENV_KWARGS} \
${MOUNT_KWARGS} \
${CONTAINER_NAME} \
micromamba run -n omnigibson /bin/bash --login -c "source /isaac-sim/setup_conda_env.sh && pytest tests/test_object_states.py"

# Clean up the image if possible.
enroot remove -f ${CONTAINER_NAME}
123 changes: 123 additions & 0 deletions docs/getting_started/slurm.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
---
icon: material/server-network
---

# 🔌 **Running on a SLURM cluster**

_This documentation is a work in progress._

OmniGibson can be run on a SLURM cluster using the _enroot_ container software, which is a replacement
for Docker that allows containers to be run as the current user rather than as root. _enroot_ needs
to be installed on your SLURM cluster by an administrator.

With enroot installed, you can follow the below steps to run OmniGibson on SLURM:

1. Download the dataset to a location that is accessible by cluster nodes. To do this, you can use
the download_dataset.py script inside OmniGibson's scripts directory, and move it to the right spot
later. In the below example, /cvgl/ is a networked drive that is accessible by the cluster nodes.
**For Stanford users, this step is already done for SVL and Viscam nodes**
```{.shell .annotate}
OMNIGIBSON_NO_OMNIVERSE=1 python scripts/download_dataset.py
mv omnigibson/data /cvgl/group/Gibson/og-data-0-2-1
```

2. (Optional) Distribute the dataset to the individual nodes.
This will make load times much better than reading from a network drive.
To do this, run the below command on your SLURM head node (replace `svl` with your partition
name and `cvgl` with your account name, as well as the paths with the respective network
and local paths). Confirm via `squeue -u $USER` that all jobs have finished. **This step is already done for SVL and Viscam nodes**
```{.shell .annotate}
sinfo -p svl -o "%N,%n" -h | \
sed s/,.*//g | \
xargs -L1 -I{} \
sbatch \
--account=cvgl --partition=svl --nodelist={} --mem=8G --cpus-per-task=4 \
--wrap 'cp -R /cvgl/group/Gibson/og-data-0-2-1 /scr-ssd/og-data-0-2-1'
```

3. Download your desired image to a location that is accessible by the cluster nodes. (Replace the path with your own path, and feel free to replace `latest` with your desired branch tag). You have the option to mount code (meaning you don't need the container to come with all the code you want to run, just the right dependencies / environment setup)
```{.shell .annotate}
enroot import --output /cvgl2/u/cgokmen/omnigibson.sqsh docker://stanfordvl/omnigibson:action-primitives
```

4. (Optional) If you intend to mount code onto the container, make it available at a location that is accessible by the cluster nodes. You can mount arbitrary code, and you can also mount a custom version of OmniGibson (for the latter, you need to make sure you mount your copy of OmniGibson at /omnigibson-src inside the container). For example:
```{.shell .annotate}
git clone https://github.com/StanfordVL/OmniGibson.git /cvgl2/u/cgokmen/OmniGibson
```

5. Create your launch script. You can start with a copy of the script below. If you want to launch multiple workers, increase the job array option. You should keep the setting at at least 1 GPU per node, but can feel free to edit other settings. You can mount any additional code as you'd like, and you can change the entrypoint such that the container runs your mounted code upon launch. See the mounts section for an example. A copy of this script can be found in docker/sbatch_example.sh
```{.shell .annotate}
#!/usr/bin/env bash
#SBATCH --account=cvgl
#SBATCH --partition=svl --qos=normal
#SBATCH --nodes=1
#SBATCH --cpus-per-task=8
#SBATCH --mem=30G
#SBATCH --gres=gpu:2080ti:1
IMAGE_PATH="/cvgl2/u/cgokmen/omnigibson.sqsh"
GPU_ID=$(nvidia-smi -L | grep -oP '(?<=GPU-)[a-fA-F0-9\-]+' | head -n 1)
ISAAC_CACHE_PATH="/scr-ssd/${SLURM_JOB_USER}/isaac_cache_${GPU_ID}"
# Define env kwargs to pass
declare -A ENVS=(
[NVIDIA_DRIVER_CAPABILITIES]=all
[NVIDIA_VISIBLE_DEVICES]=0
[DISPLAY]=""
[OMNIGIBSON_HEADLESS]=1
)
for env_var in "${!ENVS[@]}"; do
# Add to env kwargs we'll pass to enroot command later
ENV_KWARGS="${ENV_KWARGS} --env ${env_var}=${ENVS[${env_var}]}"
done
# Define mounts to create (maps local directory to container directory)
declare -A MOUNTS=(
[/scr-ssd/og-data-0-2-1]=/data
[${ISAAC_CACHE_PATH}/isaac-sim/kit/cache/Kit]=/isaac-sim/kit/cache/Kit
[${ISAAC_CACHE_PATH}/isaac-sim/cache/ov]=/root/.cache/ov
[${ISAAC_CACHE_PATH}/isaac-sim/cache/pip]=/root/.cache/pip
[${ISAAC_CACHE_PATH}/isaac-sim/cache/glcache]=/root/.cache/nvidia/GLCache
[${ISAAC_CACHE_PATH}/isaac-sim/cache/computecache]=/root/.nv/ComputeCache
[${ISAAC_CACHE_PATH}/isaac-sim/logs]=/root/.nvidia-omniverse/logs
[${ISAAC_CACHE_PATH}/isaac-sim/config]=/root/.nvidia-omniverse/config
[${ISAAC_CACHE_PATH}/isaac-sim/data]=/root/.local/share/ov/data
[${ISAAC_CACHE_PATH}/isaac-sim/documents]=/root/Documents
# Feel free to include lines like the below to mount a workspace or a custom OG version
# [/cvgl2/u/cgokmen/OmniGibson]=/omnigibson-src
# [/cvgl2/u/cgokmen/my-project]=/my-project
)
MOUNT_KWARGS=""
for mount in "${!MOUNTS[@]}"; do
# Verify mount path in local directory exists, otherwise, create it
if [ ! -e "$mount" ]; then
mkdir -p ${mount}
fi
# Add to mount kwargs we'll pass to enroot command later
MOUNT_KWARGS="${MOUNT_KWARGS} --mount ${mount}:${MOUNTS[${mount}]}"
done
# Create the image if it doesn't already exist
CONTAINER_NAME=omnigibson_${GPU_ID}
enroot create --force --name ${CONTAINER_NAME} ${IMAGE_PATH}
# Remove leading space in string
ENV_KWARGS="${ENV_KWARGS:1}"
MOUNT_KWARGS="${MOUNT_KWARGS:1}"
# The last line here is the command you want to run inside the container.
# Here I'm running some unit tests.
enroot start \
--root \
--rw \
${ENV_KWARGS} \
${MOUNT_KWARGS} \
${CONTAINER_NAME} \
source /isaac-sim/setup_conda_env.sh && pytest tests/test_object_states.py
# Clean up the image if possible.
enroot remove -f ${CONTAINER_NAME}
```

6. Launch your job using `sbatch your_script.sh` - and profit!
98 changes: 98 additions & 0 deletions omnigibson/action_primitives/action_primitive_set_base.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
import inspect
from abc import ABCMeta, abstractmethod
from enum import IntEnum
from typing import List

from future.utils import with_metaclass
from omnigibson import Environment

from omnigibson.robots import BaseRobot
from omnigibson.scenes.interactive_traversable_scene import InteractiveTraversableScene
from omnigibson.tasks.task_base import BaseTask

REGISTERED_PRIMITIVE_SETS = {}

class ActionPrimitiveError(ValueError):
class Reason(IntEnum):
# A primitive could not be executed because a precondition was not satisfied, e.g. PLACE was called without an
# object currently in hand.
PRE_CONDITION_ERROR = 0

# A sampling error occurred: e.g. a position to place an object could not be found, or the robot could not
# find a pose near the object to navigate to.
SAMPLING_ERROR = 1

# The planning for a primitive failed possibly due to not being able to find a path.
PLANNING_ERROR = 2

# The planning for a primitive was successfully completed, but an error occurred during execution.
EXECUTION_ERROR = 3

# The execution of the primitive happened correctly, but while checking post-conditions, an error was found.
POST_CONDITION_ERROR = 4

def __init__(self, reason: Reason, message, metadata=None):
self.reason = reason
self.metadata = metadata if metadata is not None else {}
super().__init__(f"{reason.name}: {message}. Additional info: {metadata}")


class ActionPrimitiveErrorGroup(ValueError):
def __init__(self, exceptions: List[ActionPrimitiveError]) -> None:
self._exceptions = tuple(exceptions)
submessages = [f"Attempt {i}: {e}" for i, e in enumerate(exceptions)]
submessages = "\n\n".join(submessages)
message = "An error occurred during each attempt of this action.\n\n" + submessages
super().__init__(message)

@property
def exceptions(self):
return self._exceptions


class BaseActionPrimitiveSet(with_metaclass(ABCMeta, object)):
def __init_subclass__(cls, **kwargs):
"""
Registers all subclasses as part of this registry. This is useful to decouple internal codebase from external
user additions. This way, users can add their custom primitive set by simply extending this class,
and it will automatically be registered internally. This allows users to then specify their primitive set
directly in string-from in e.g., their config files, without having to manually set the str-to-class mapping
in our code.
"""
if not inspect.isabstract(cls):
REGISTERED_PRIMITIVE_SETS[cls.__name__] = cls

def __init__(self, env):
self.env : Environment = env

@property
def robot(self):
# Currently returns the first robot in the environment, but can be scaled to multiple robots
# by creating multiple action generators and passing in a robot index etc.
return self.env.robots[0]

@abstractmethod
def get_action_space(self):
"""Get the higher-level action space as an OpenAI Gym Space object."""
pass

@abstractmethod
def apply(self, action):
"""
Apply a primitive action.
Given a higher-level action in the same format as the action space (e.g. as a number),
generates a sequence of lower level actions (or raise ActionPrimitiveError). The action
will get resolved and passed into apply_ref.
"""
pass

@abstractmethod
def apply_ref(self, action, *args):
"""
Apply a primitive action by reference.
Given a higher-level action from the corresponding action set enum and any necessary arguments,
generates a sequence of lower level actions (or raise ActionPrimitiveError)
"""
pass
Loading

0 comments on commit a476db4

Please sign in to comment.