diff --git a/.github/workflows/build-push-containers.yml b/.github/workflows/build-push-containers.yml
index aa27c728b..286c94d36 100644
--- a/.github/workflows/build-push-containers.yml
+++ b/.github/workflows/build-push-containers.yml
@@ -10,28 +10,67 @@ on:
jobs:
docker:
- runs-on: [self-hosted, linux, gpu]
+ runs-on: ubuntu-latest
steps:
+ -
+ name: Check disk space
+ run: df . -h
+ -
+ name: Free disk space
+ run: |
+ sudo docker rmi $(docker image ls -aq) >/dev/null 2>&1 || true
+ sudo rm -rf \
+ /usr/share/dotnet /usr/local/lib/android /opt/ghc \
+ /usr/local/share/powershell /usr/share/swift /usr/local/.ghcup \
+ /usr/lib/jvm || true
+ echo "some directories deleted"
+ sudo apt install aptitude -y >/dev/null 2>&1
+ sudo aptitude purge aria2 ansible azure-cli shellcheck rpm xorriso zsync \
+ esl-erlang firefox gfortran-8 gfortran-9 google-chrome-stable \
+ google-cloud-sdk imagemagick \
+ libmagickcore-dev libmagickwand-dev libmagic-dev ant ant-optional kubectl \
+ mercurial apt-transport-https mono-complete libmysqlclient \
+ unixodbc-dev yarn chrpath libssl-dev libxft-dev \
+ libfreetype6 libfreetype6-dev libfontconfig1 libfontconfig1-dev \
+ snmp pollinate libpq-dev postgresql-client powershell ruby-full \
+ sphinxsearch subversion mongodb-org azure-cli microsoft-edge-stable \
+ -y -f >/dev/null 2>&1
+ sudo aptitude purge google-cloud-sdk -f -y >/dev/null 2>&1
+ sudo aptitude purge microsoft-edge-stable -f -y >/dev/null 2>&1 || true
+ sudo apt purge microsoft-edge-stable -f -y >/dev/null 2>&1 || true
+ sudo aptitude purge '~n ^mysql' -f -y >/dev/null 2>&1
+ sudo aptitude purge '~n ^php' -f -y >/dev/null 2>&1
+ sudo aptitude purge '~n ^dotnet' -f -y >/dev/null 2>&1
+ sudo apt-get autoremove -y >/dev/null 2>&1
+ sudo apt-get autoclean -y >/dev/null 2>&1
+ echo "some packages purged"
+ -
+ name: Check disk space
+ run: |
+ df . -h
+ -
+ name: Checkout
+ uses: actions/checkout@v4
-
name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v2
+ uses: docker/setup-buildx-action@v3
-
name: Login to NVCR
- uses: docker/login-action@v2
+ uses: docker/login-action@v3
with:
registry: nvcr.io
username: ${{ secrets.NVCR_USERNAME }}
password: ${{ secrets.NVCR_PASSWORD }}
-
name: Login to Docker Hub
- uses: docker/login-action@v2
+ uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
-
name: Metadata for dev Image
id: meta-dev
- uses: docker/metadata-action@v4
+ uses: docker/metadata-action@v5
with:
images: |
stanfordvl/omnigibson-dev
@@ -41,7 +80,7 @@ jobs:
-
name: Metadata for prod Image
id: meta-prod
- uses: docker/metadata-action@v4
+ uses: docker/metadata-action@v5
with:
images: |
stanfordvl/omnigibson
@@ -50,18 +89,25 @@ jobs:
type=semver,pattern={{version}}
-
name: Build and push dev image
- uses: docker/build-push-action@v4
+ id: build-dev
+ uses: docker/build-push-action@v5
with:
+ context: .
push: true
tags: ${{ steps.meta-dev.outputs.tags }}
labels: ${{ steps.meta-dev.outputs.labels }}
file: docker/dev.Dockerfile
cache-from: type=gha
cache-to: type=gha,mode=max
+
+ - name: Update prod image Dockerfile with dev image tag
+ run: |
+ sed -i "s/omnigibson-dev:latest/omnigibson-dev@${{ steps.build-dev.outputs.digest }}/g" docker/prod.Dockerfile && cat docker/prod.Dockerfile
-
name: Build and push prod image
- uses: docker/build-push-action@v4
+ uses: docker/build-push-action@v5
with:
+ context: .
push: true
tags: ${{ steps.meta-prod.outputs.tags }}
labels: ${{ steps.meta-prod.outputs.labels }}
diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
deleted file mode 100644
index ad99e03df..000000000
--- a/.github/workflows/docs.yml
+++ /dev/null
@@ -1,77 +0,0 @@
-name: Build & deploy docs
-
-on:
- workflow_dispatch:
- pull_request:
- branches:
- - og-develop
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event_name == 'pull_request' && github.head_ref || github.sha }}
- cancel-in-progress: true
-
-jobs:
- docs:
- runs-on: [linux]
- container:
- image: stanfordvl/omnigibson-dev:latest
- options: --gpus=all --privileged --user=root
- env:
- DISPLAY: ""
- OMNIGIBSON_HEADLESS: 1
- volumes:
- - /scr/omni-data/datasets:/data
- - /usr/share/vulkan/icd.d/nvidia_icd.json:/etc/vulkan/icd.d/nvidia_icd.json
- - /usr/share/vulkan/icd.d/nvidia_layers.json:/etc/vulkan/implicit_layer.d/nvidia_layers.json
- - /usr/share/glvnd/egl_vendor.d/10_nvidia.json:/usr/share/glvnd/egl_vendor.d/10_nvidia.json
- - /scr/omni-data/isaac-sim/cache/ov:/root/.cache/ov:rw
- - /scr/omni-data/isaac-sim/cache/pip:/root/.cache/pip:rw
- - /scr/omni-data/isaac-sim/cache/glcache:/root/.cache/nvidia/GLCache:rw
- - /scr/omni-data/isaac-sim/cache/computecache:/root/.nv/ComputeCache:rw
- - /scr/omni-data/isaac-sim/logs:/root/.nvidia-omniverse/logs:rw
- - /scr/omni-data/isaac-sim/config:/root/.nvidia-omniverse/config:rw
- - /scr/omni-data/isaac-sim/data:/root/.local/share/ov/data:rw
- - /scr/omni-data/isaac-sim/documents:/root/Documents:rw
-
- defaults:
- run:
- shell: micromamba run -n omnigibson /bin/bash -leo pipefail {0}
-
- steps:
- - name: Fix home
- run: echo "HOME=/root" >> $GITHUB_ENV
-
- - name: Checkout source
- uses: actions/checkout@v3
- with:
- submodules: true
- path: omnigibson-src
-
- - name: Install
- working-directory: omnigibson-src
- run: pip install -e .
-
- - name: Install dev requirements
- working-directory: omnigibson-src
- run: pip install -r requirements-dev.txt
-
- - name: Build docs
- working-directory: omnigibson-src
- run: source scripts/build_docs.sh
-
- - name: Copy benchmark index page
- working-directory: omnigibson-src
- run: mkdir site/benchmark && cp scripts/benchmark* site/benchmark/ && cd site/benchmark && mv benchmark.html index.html
-
- - name: Fetch benchmark file from gh-pages branch
- working-directory: omnigibson-src
- run: |
- git fetch origin gh-pages:gh-pages --force
- git show gh-pages:benchmark/data.js > ./site/benchmark/data.js
- git diff
-
- - name: Deploy to gh-pages
- uses: peaceiris/actions-gh-pages@v3
- with:
- github_token: ${{ secrets.GITHUB_TOKEN }}
- publish_dir: ./omnigibson-src/site
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index f3ce0b784..c35794bf5 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -8,26 +8,7 @@ concurrency:
jobs:
test:
- runs-on: [self-hosted, linux, gpu]
- container:
- image: stanfordvl/omnigibson-dev:latest
- options: --gpus=all --privileged --user=root
- env:
- DISPLAY: ""
- OMNIGIBSON_HEADLESS: 1
- volumes:
- - /scr/omni-data/datasets:/data
- - /usr/share/vulkan/icd.d/nvidia_icd.json:/etc/vulkan/icd.d/nvidia_icd.json
- - /usr/share/vulkan/icd.d/nvidia_layers.json:/etc/vulkan/implicit_layer.d/nvidia_layers.json
- - /usr/share/glvnd/egl_vendor.d/10_nvidia.json:/usr/share/glvnd/egl_vendor.d/10_nvidia.json
- - /scr/omni-data/isaac-sim/cache/ov:/root/.cache/ov:rw
- - /scr/omni-data/isaac-sim/cache/pip:/root/.cache/pip:rw
- - /scr/omni-data/isaac-sim/cache/glcache:/root/.cache/nvidia/GLCache:rw
- - /scr/omni-data/isaac-sim/cache/computecache:/root/.nv/ComputeCache:rw
- - /scr/omni-data/isaac-sim/logs:/root/.nvidia-omniverse/logs:rw
- - /scr/omni-data/isaac-sim/config:/root/.nvidia-omniverse/config:rw
- - /scr/omni-data/isaac-sim/data:/root/.local/share/ov/data:rw
- - /scr/omni-data/isaac-sim/documents:/root/Documents:rw
+ runs-on: [self-hosted, linux, gpu, dataset-enabled]
defaults:
run:
diff --git a/README.md b/README.md
index 3a5d623b5..75706983a 100644
--- a/README.md
+++ b/README.md
@@ -6,6 +6,11 @@
-------
+## Need support? Join our Discord!
+
+
+-------
+
## Latest Updates
- [08/04/23] **v0.2.0**: More assets! 600 pre-sampled tasks, 7 new scenes, and many new objects 📈 [[release notes]](https://github.com/StanfordVL/OmniGibson/releases/tag/v0.2.0)
@@ -22,7 +27,7 @@
* 🤖 Mobile Manipulator Robots with Modular ⚙️ Controllers
* 🌎 OpenAI Gym Interface
-Check out [**`OmniGibson`**'s documentation](https://stanfordvl.github.io/OmniGibson/getting_started/installation.html) to get started!
+Check out [**`OmniGibson`**'s documentation](https://behavior.stanford.edu/omnigibson/getting_started/installation.html) to get started!
### Citation
If you use **`OmniGibson`** or its assets and models, please cite:
diff --git a/docker/dev.Dockerfile b/docker/dev.Dockerfile
index 113889b7d..5625223f8 100644
--- a/docker/dev.Dockerfile
+++ b/docker/dev.Dockerfile
@@ -1,4 +1,4 @@
-FROM nvcr.io/nvidia/isaac-sim:2022.2.0
+FROM nvcr.io/nvidia/isaac-sim:2023.1.0
# Set up all the prerequisites.
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
@@ -19,7 +19,7 @@ ENV OMNIGIBSON_KEY_PATH /data/omnigibson.key
# Install Mamba (light conda alternative)
RUN curl -Ls https://micro.mamba.pm/api/micromamba/linux-64/latest | tar -xvj -C / bin/micromamba
ENV MAMBA_ROOT_PREFIX /micromamba
-RUN micromamba create -n omnigibson -c conda-forge python=3.7
+RUN micromamba create -n omnigibson -c conda-forge python=3.10
RUN micromamba shell init --shell=bash --prefix=/micromamba
# Make sure isaac gets properly sourced every time omnigibson gets called
@@ -38,22 +38,24 @@ ENV MAKEFLAGS="-j `nproc`"
RUN micromamba run -n omnigibson micromamba install -c conda-forge boost && \
micromamba run -n omnigibson pip install pyplusplus && \
git clone https://github.com/ompl/ompl.git /ompl && \
- mkdir -p /ompl/build/Release
+ mkdir -p /ompl/build/Release && \
+ sed -i "s/find_program(PYPY/# find_program(PYPY/g" /ompl/CMakeModules/Findpypy.cmake
-# Build and install OMPL
-RUN cd /ompl/build/Release && \
+# Build and install OMPL
+RUN micromamba run -n omnigibson /bin/bash --login -c 'source /isaac-sim/setup_conda_env.sh && (which python > /root/PYTHON_EXEC) && (echo $PYTHONPATH > /root/PYTHONPATH)' && \
+ cd /ompl/build/Release && \
micromamba run -n omnigibson cmake ../.. \
-DCMAKE_INSTALL_PREFIX="$CONDA_PREFIX" \
-DBOOST_ROOT="$CONDA_PREFIX" \
- -DPYTHON_EXEC=/micromamba/envs/omnigibson/bin/python3.7 \
- -DPYTHONPATH=/micromamba/envs/omnigibson/lib/python3.7/site-packages && \
+ -DPYTHON_EXEC=$(cat /root/PYTHON_EXEC) \
+ -DPYTHONPATH=$(cat /root/PYTHONPATH) && \
micromamba run -n omnigibson make -j 4 update_bindings && \
micromamba run -n omnigibson make -j 4 && \
cd py-bindings && \
micromamba run -n omnigibson make install
# Test OMPL
-RUN micromamba run -n omnigibson python -c "import ompl"
+RUN micromamba run -n omnigibson python -c "from ompl import base"
ENTRYPOINT ["micromamba", "run", "-n", "omnigibson"]
diff --git a/docker/prod.Dockerfile b/docker/prod.Dockerfile
index a0eaeef21..9feb45495 100644
--- a/docker/prod.Dockerfile
+++ b/docker/prod.Dockerfile
@@ -1,4 +1,4 @@
-FROM stanfordvl/omnigibson-dev
+FROM stanfordvl/omnigibson-dev:latest
# Copy over omnigibson source
ADD . /omnigibson-src
diff --git a/docker/run_docker.sh b/docker/run_docker.sh
index 146ae218c..0d596cc67 100755
--- a/docker/run_docker.sh
+++ b/docker/run_docker.sh
@@ -26,55 +26,6 @@ do
esac
done
-ICD_PATH_1="/usr/share/vulkan/icd.d/nvidia_icd.json"
-ICD_PATH_2="/etc/vulkan/icd.d/nvidia_icd.json"
-LAYERS_PATH_1="/usr/share/vulkan/icd.d/nvidia_layers.json"
-LAYERS_PATH_2="/usr/share/vulkan/implicit_layer.d/nvidia_layers.json"
-LAYERS_PATH_3="/etc/vulkan/implicit_layer.d/nvidia_layers.json"
-EGL_VENDOR_PATH="/usr/share/glvnd/egl_vendor.d/10_nvidia.json"
-
-# Find the ICD file
-if [ -e "$ICD_PATH_1" ]; then
- ICD_PATH=$ICD_PATH_1
-elif [ -e "$ICD_PATH_2" ]; then
- ICD_PATH=$ICD_PATH_2
-else
- echo "Missing nvidia_icd.json file.";
- echo "Typical paths:";
- echo "- /usr/share/vulkan/icd.d/nvidia_icd.json or";
- echo "- /etc/vulkan/icd.d/nvidia_icd.json";
- echo "You can google nvidia_icd.json for your distro to find the correct path.";
- echo "Consider updating your driver to 525 if you cannot find the file.";
- echo "To continue update the ICD_PATH_1 at the top of the run_docker.sh file and retry";
- exit;
-fi
-
-# Find the layers file
-if [ -e "$LAYERS_PATH_1" ]; then
- LAYERS_PATH=$LAYERS_PATH_1
-elif [ -e "$LAYERS_PATH_2" ]; then
- LAYERS_PATH=$LAYERS_PATH_2
-elif [ -e "$LAYERS_PATH_3" ]; then
- LAYERS_PATH=$LAYERS_PATH_3
-else
- echo "Missing nvidia_layers.json file."
- echo "Typical paths:";
- echo "- /usr/share/vulkan/icd.d/nvidia_layers.json";
- echo "- /usr/share/vulkan/implicit_layer.d/nvidia_layers.json";
- echo "- /etc/vulkan/implicit_layer.d/nvidia_layers.json";
- echo "You can google nvidia_layers.json for your distro to find the correct path.";
- echo "Consider updating your driver to 525 if you cannot find the file.";
- echo "To continue update the LAYERS_PATH_1 at the top of the run_docker.sh file and retry";
- exit;
-fi
-
-if [ ! -e "$EGL_VENDOR_PATH" ]; then
- echo "Missing ${EGL_VENDOR_PATH} file."
- echo "(default path: /usr/share/vulkan/icd.d/nvidia_icd.json)";
- echo "To continue update the EGL_VENDOR_PATH at the top of the run_docker.sh file and retry";
- exit;
-fi
-
# Move directories from their legacy paths.
if [ -e "${DATA_PATH}/og_dataset" ]; then
mv "${DATA_PATH}/og_dataset" "${DATA_PATH}/datasets/og_dataset"
@@ -117,9 +68,6 @@ docker run \
-e DISPLAY=${DOCKER_DISPLAY} \
-e OMNIGIBSON_HEADLESS=${OMNIGIBSON_HEADLESS} \
-v $DATA_PATH/datasets:/data \
- -v ${ICD_PATH}:/etc/vulkan/icd.d/nvidia_icd.json \
- -v ${LAYERS_PATH}:/etc/vulkan/implicit_layer.d/nvidia_layers.json \
- -v ${EGL_VENDOR_PATH}:/usr/share/glvnd/egl_vendor.d/10_nvidia.json \
-v $DATA_PATH/isaac-sim/cache/kit:/isaac-sim/kit/cache/Kit:rw \
-v $DATA_PATH/isaac-sim/cache/ov:/root/.cache/ov:rw \
-v $DATA_PATH/isaac-sim/cache/pip:/root/.cache/pip:rw \
diff --git a/docker/sbatch_example.sh b/docker/sbatch_example.sh
new file mode 100644
index 000000000..5e07dd65a
--- /dev/null
+++ b/docker/sbatch_example.sh
@@ -0,0 +1,71 @@
+#!/usr/bin/env bash
+#SBATCH --account=cvgl
+#SBATCH --partition=svl --qos=normal
+#SBATCH --nodes=1
+#SBATCH --cpus-per-task=8
+#SBATCH --mem=30G
+#SBATCH --gres=gpu:2080ti:1
+
+IMAGE_PATH="/cvgl2/u/cgokmen/omnigibson.sqsh"
+GPU_ID=$(nvidia-smi -L | grep -oP '(?<=GPU-)[a-fA-F0-9\-]+' | head -n 1)
+ISAAC_CACHE_PATH="/scr-ssd/${SLURM_JOB_USER}/isaac_cache_${GPU_ID}"
+
+# Define env kwargs to pass
+declare -A ENVS=(
+ [NVIDIA_DRIVER_CAPABILITIES]=all
+ [NVIDIA_VISIBLE_DEVICES]=0
+ [DISPLAY]=""
+ [OMNIGIBSON_HEADLESS]=1
+)
+for env_var in "${!ENVS[@]}"; do
+ # Add to env kwargs we'll pass to enroot command later
+ ENV_KWARGS="${ENV_KWARGS} --env ${env_var}=${ENVS[${env_var}]}"
+done
+
+# Define mounts to create (maps local directory to container directory)
+declare -A MOUNTS=(
+ [/scr-ssd/og-data-0-2-1]=/data
+ [${ISAAC_CACHE_PATH}/isaac-sim/kit/cache/Kit]=/isaac-sim/kit/cache/Kit
+ [${ISAAC_CACHE_PATH}/isaac-sim/cache/ov]=/root/.cache/ov
+ [${ISAAC_CACHE_PATH}/isaac-sim/cache/pip]=/root/.cache/pip
+ [${ISAAC_CACHE_PATH}/isaac-sim/cache/glcache]=/root/.cache/nvidia/GLCache
+ [${ISAAC_CACHE_PATH}/isaac-sim/cache/computecache]=/root/.nv/ComputeCache
+ [${ISAAC_CACHE_PATH}/isaac-sim/logs]=/root/.nvidia-omniverse/logs
+ [${ISAAC_CACHE_PATH}/isaac-sim/config]=/root/.nvidia-omniverse/config
+ [${ISAAC_CACHE_PATH}/isaac-sim/data]=/root/.local/share/ov/data
+ [${ISAAC_CACHE_PATH}/isaac-sim/documents]=/root/Documents
+ # Feel free to include lines like the below to mount a workspace or a custom OG version
+ # [/cvgl2/u/cgokmen/OmniGibson]=/omnigibson-src
+ # [/cvgl2/u/cgokmen/my-project]=/my-project
+)
+
+MOUNT_KWARGS=""
+for mount in "${!MOUNTS[@]}"; do
+ # Verify mount path in local directory exists, otherwise, create it
+ if [ ! -e "$mount" ]; then
+ mkdir -p ${mount}
+ fi
+ # Add to mount kwargs we'll pass to enroot command later
+ MOUNT_KWARGS="${MOUNT_KWARGS} --mount ${mount}:${MOUNTS[${mount}]}"
+done
+
+# Create the image if it doesn't already exist
+CONTAINER_NAME=omnigibson_${GPU_ID}
+enroot create --force --name ${CONTAINER_NAME} ${IMAGE_PATH}
+
+# Remove leading space in string
+ENV_KWARGS="${ENV_KWARGS:1}"
+MOUNT_KWARGS="${MOUNT_KWARGS:1}"
+
+# The last line here is the command you want to run inside the container.
+# Here I'm running some unit tests.
+enroot start \
+ --root \
+ --rw \
+ ${ENV_KWARGS} \
+ ${MOUNT_KWARGS} \
+ ${CONTAINER_NAME} \
+ micromamba run -n omnigibson /bin/bash --login -c "source /isaac-sim/setup_conda_env.sh && pytest tests/test_object_states.py"
+
+# Clean up the image if possible.
+enroot remove -f ${CONTAINER_NAME}
\ No newline at end of file
diff --git a/docs/getting_started/building_blocks.md b/docs/getting_started/building_blocks.md
index c3fce4b21..d64c57bd8 100644
--- a/docs/getting_started/building_blocks.md
+++ b/docs/getting_started/building_blocks.md
@@ -11,7 +11,7 @@ icon: octicons/package-16
??? question annotate "Why macros?"
- Macros enforce global behavior that is consistent within an individual python process but can differ between processes. This is useful because globally enabling all of **`OmniGibson`**'s features can cause unecessary slowdowns, and so configuring the macros for your specific use case can optimize performance.
+ Macros enforce global behavior that is consistent within an individual python process but can differ between processes. This is useful because globally enabling all of **`OmniGibson`**'s features can cause unnecessary slowdowns, and so configuring the macros for your specific use case can optimize performance.
For example, Omniverse provides a so-called `flatcache` feature which provides significant performance boosts, but cannot be used when fluids or soft bodies are present. So, we ideally should always have `gm.USE_FLATCACHE=True` unless we have fluids or soft bodies in our environment.
@@ -407,7 +407,7 @@ python -m omnigibson.examples.object_states.particle_source_sink_demo
This demo loads in a sink, which is enabled with both the ParticleSource and ParticleSink states. The sink's particle source is located at the faucet spout and spawns a continuous stream of water particles, which is then destroyed ("sunk") by the sink's particle sink located at the drain.
-??? note "Difference bewteen `ParticleApplier/Removers` and `ParticleSource/Sinks`"
+??? note "Difference between `ParticleApplier/Removers` and `ParticleSource/Sinks`"
The key difference between `ParticleApplier/Removers` and `ParticleSource/Sinks` is that `Applier/Removers`
requires contact (if using `ParticleProjectionMethod.ADJACENCY`) or overlap
(if using `ParticleProjectionMethod.PROJECTION`) in order to spawn / remove particles, and generally only spawn
diff --git a/docs/getting_started/slurm.md b/docs/getting_started/slurm.md
new file mode 100644
index 000000000..93963bec0
--- /dev/null
+++ b/docs/getting_started/slurm.md
@@ -0,0 +1,123 @@
+---
+icon: material/server-network
+---
+
+# 🔌 **Running on a SLURM cluster**
+
+_This documentation is a work in progress._
+
+OmniGibson can be run on a SLURM cluster using the _enroot_ container software, which is a replacement
+for Docker that allows containers to be run as the current user rather than as root. _enroot_ needs
+to be installed on your SLURM cluster by an administrator.
+
+With enroot installed, you can follow the below steps to run OmniGibson on SLURM:
+
+1. Download the dataset to a location that is accessible by cluster nodes. To do this, you can use
+the download_dataset.py script inside OmniGibson's scripts directory, and move it to the right spot
+later. In the below example, /cvgl/ is a networked drive that is accessible by the cluster nodes.
+**For Stanford users, this step is already done for SVL and Viscam nodes**
+```{.shell .annotate}
+OMNIGIBSON_NO_OMNIVERSE=1 python scripts/download_dataset.py
+mv omnigibson/data /cvgl/group/Gibson/og-data-0-2-1
+```
+
+2. (Optional) Distribute the dataset to the individual nodes.
+This will make load times much better than reading from a network drive.
+To do this, run the below command on your SLURM head node (replace `svl` with your partition
+name and `cvgl` with your account name, as well as the paths with the respective network
+and local paths). Confirm via `squeue -u $USER` that all jobs have finished. **This step is already done for SVL and Viscam nodes**
+```{.shell .annotate}
+sinfo -p svl -o "%N,%n" -h | \
+ sed s/,.*//g | \
+ xargs -L1 -I{} \
+ sbatch \
+ --account=cvgl --partition=svl --nodelist={} --mem=8G --cpus-per-task=4 \
+ --wrap 'cp -R /cvgl/group/Gibson/og-data-0-2-1 /scr-ssd/og-data-0-2-1'
+```
+
+3. Download your desired image to a location that is accessible by the cluster nodes. (Replace the path with your own path, and feel free to replace `latest` with your desired branch tag). You have the option to mount code (meaning you don't need the container to come with all the code you want to run, just the right dependencies / environment setup)
+```{.shell .annotate}
+enroot import --output /cvgl2/u/cgokmen/omnigibson.sqsh docker://stanfordvl/omnigibson:action-primitives
+```
+
+4. (Optional) If you intend to mount code onto the container, make it available at a location that is accessible by the cluster nodes. You can mount arbitrary code, and you can also mount a custom version of OmniGibson (for the latter, you need to make sure you mount your copy of OmniGibson at /omnigibson-src inside the container). For example:
+```{.shell .annotate}
+git clone https://github.com/StanfordVL/OmniGibson.git /cvgl2/u/cgokmen/OmniGibson
+```
+
+5. Create your launch script. You can start with a copy of the script below. If you want to launch multiple workers, increase the job array option. You should keep the setting at at least 1 GPU per node, but can feel free to edit other settings. You can mount any additional code as you'd like, and you can change the entrypoint such that the container runs your mounted code upon launch. See the mounts section for an example. A copy of this script can be found in docker/sbatch_example.sh
+```{.shell .annotate}
+#!/usr/bin/env bash
+#SBATCH --account=cvgl
+#SBATCH --partition=svl --qos=normal
+#SBATCH --nodes=1
+#SBATCH --cpus-per-task=8
+#SBATCH --mem=30G
+#SBATCH --gres=gpu:2080ti:1
+
+IMAGE_PATH="/cvgl2/u/cgokmen/omnigibson.sqsh"
+GPU_ID=$(nvidia-smi -L | grep -oP '(?<=GPU-)[a-fA-F0-9\-]+' | head -n 1)
+ISAAC_CACHE_PATH="/scr-ssd/${SLURM_JOB_USER}/isaac_cache_${GPU_ID}"
+
+# Define env kwargs to pass
+declare -A ENVS=(
+ [NVIDIA_DRIVER_CAPABILITIES]=all
+ [NVIDIA_VISIBLE_DEVICES]=0
+ [DISPLAY]=""
+ [OMNIGIBSON_HEADLESS]=1
+)
+for env_var in "${!ENVS[@]}"; do
+ # Add to env kwargs we'll pass to enroot command later
+ ENV_KWARGS="${ENV_KWARGS} --env ${env_var}=${ENVS[${env_var}]}"
+done
+
+# Define mounts to create (maps local directory to container directory)
+declare -A MOUNTS=(
+ [/scr-ssd/og-data-0-2-1]=/data
+ [${ISAAC_CACHE_PATH}/isaac-sim/kit/cache/Kit]=/isaac-sim/kit/cache/Kit
+ [${ISAAC_CACHE_PATH}/isaac-sim/cache/ov]=/root/.cache/ov
+ [${ISAAC_CACHE_PATH}/isaac-sim/cache/pip]=/root/.cache/pip
+ [${ISAAC_CACHE_PATH}/isaac-sim/cache/glcache]=/root/.cache/nvidia/GLCache
+ [${ISAAC_CACHE_PATH}/isaac-sim/cache/computecache]=/root/.nv/ComputeCache
+ [${ISAAC_CACHE_PATH}/isaac-sim/logs]=/root/.nvidia-omniverse/logs
+ [${ISAAC_CACHE_PATH}/isaac-sim/config]=/root/.nvidia-omniverse/config
+ [${ISAAC_CACHE_PATH}/isaac-sim/data]=/root/.local/share/ov/data
+ [${ISAAC_CACHE_PATH}/isaac-sim/documents]=/root/Documents
+ # Feel free to include lines like the below to mount a workspace or a custom OG version
+ # [/cvgl2/u/cgokmen/OmniGibson]=/omnigibson-src
+ # [/cvgl2/u/cgokmen/my-project]=/my-project
+)
+
+MOUNT_KWARGS=""
+for mount in "${!MOUNTS[@]}"; do
+ # Verify mount path in local directory exists, otherwise, create it
+ if [ ! -e "$mount" ]; then
+ mkdir -p ${mount}
+ fi
+ # Add to mount kwargs we'll pass to enroot command later
+ MOUNT_KWARGS="${MOUNT_KWARGS} --mount ${mount}:${MOUNTS[${mount}]}"
+done
+
+# Create the image if it doesn't already exist
+CONTAINER_NAME=omnigibson_${GPU_ID}
+enroot create --force --name ${CONTAINER_NAME} ${IMAGE_PATH}
+
+# Remove leading space in string
+ENV_KWARGS="${ENV_KWARGS:1}"
+MOUNT_KWARGS="${MOUNT_KWARGS:1}"
+
+# The last line here is the command you want to run inside the container.
+# Here I'm running some unit tests.
+enroot start \
+ --root \
+ --rw \
+ ${ENV_KWARGS} \
+ ${MOUNT_KWARGS} \
+ ${CONTAINER_NAME} \
+ source /isaac-sim/setup_conda_env.sh && pytest tests/test_object_states.py
+
+# Clean up the image if possible.
+enroot remove -f ${CONTAINER_NAME}
+```
+
+6. Launch your job using `sbatch your_script.sh` - and profit!
\ No newline at end of file
diff --git a/omnigibson/action_primitives/action_primitive_set_base.py b/omnigibson/action_primitives/action_primitive_set_base.py
new file mode 100644
index 000000000..9d842514a
--- /dev/null
+++ b/omnigibson/action_primitives/action_primitive_set_base.py
@@ -0,0 +1,98 @@
+import inspect
+from abc import ABCMeta, abstractmethod
+from enum import IntEnum
+from typing import List
+
+from future.utils import with_metaclass
+from omnigibson import Environment
+
+from omnigibson.robots import BaseRobot
+from omnigibson.scenes.interactive_traversable_scene import InteractiveTraversableScene
+from omnigibson.tasks.task_base import BaseTask
+
+REGISTERED_PRIMITIVE_SETS = {}
+
+class ActionPrimitiveError(ValueError):
+ class Reason(IntEnum):
+ # A primitive could not be executed because a precondition was not satisfied, e.g. PLACE was called without an
+ # object currently in hand.
+ PRE_CONDITION_ERROR = 0
+
+ # A sampling error occurred: e.g. a position to place an object could not be found, or the robot could not
+ # find a pose near the object to navigate to.
+ SAMPLING_ERROR = 1
+
+ # The planning for a primitive failed possibly due to not being able to find a path.
+ PLANNING_ERROR = 2
+
+ # The planning for a primitive was successfully completed, but an error occurred during execution.
+ EXECUTION_ERROR = 3
+
+ # The execution of the primitive happened correctly, but while checking post-conditions, an error was found.
+ POST_CONDITION_ERROR = 4
+
+ def __init__(self, reason: Reason, message, metadata=None):
+ self.reason = reason
+ self.metadata = metadata if metadata is not None else {}
+ super().__init__(f"{reason.name}: {message}. Additional info: {metadata}")
+
+
+class ActionPrimitiveErrorGroup(ValueError):
+ def __init__(self, exceptions: List[ActionPrimitiveError]) -> None:
+ self._exceptions = tuple(exceptions)
+ submessages = [f"Attempt {i}: {e}" for i, e in enumerate(exceptions)]
+ submessages = "\n\n".join(submessages)
+ message = "An error occurred during each attempt of this action.\n\n" + submessages
+ super().__init__(message)
+
+ @property
+ def exceptions(self):
+ return self._exceptions
+
+
+class BaseActionPrimitiveSet(with_metaclass(ABCMeta, object)):
+ def __init_subclass__(cls, **kwargs):
+ """
+ Registers all subclasses as part of this registry. This is useful to decouple internal codebase from external
+ user additions. This way, users can add their custom primitive set by simply extending this class,
+ and it will automatically be registered internally. This allows users to then specify their primitive set
+ directly in string-from in e.g., their config files, without having to manually set the str-to-class mapping
+ in our code.
+ """
+ if not inspect.isabstract(cls):
+ REGISTERED_PRIMITIVE_SETS[cls.__name__] = cls
+
+ def __init__(self, env):
+ self.env : Environment = env
+
+ @property
+ def robot(self):
+ # Currently returns the first robot in the environment, but can be scaled to multiple robots
+ # by creating multiple action generators and passing in a robot index etc.
+ return self.env.robots[0]
+
+ @abstractmethod
+ def get_action_space(self):
+ """Get the higher-level action space as an OpenAI Gym Space object."""
+ pass
+
+ @abstractmethod
+ def apply(self, action):
+ """
+ Apply a primitive action.
+
+ Given a higher-level action in the same format as the action space (e.g. as a number),
+ generates a sequence of lower level actions (or raise ActionPrimitiveError). The action
+ will get resolved and passed into apply_ref.
+ """
+ pass
+
+ @abstractmethod
+ def apply_ref(self, action, *args):
+ """
+ Apply a primitive action by reference.
+
+ Given a higher-level action from the corresponding action set enum and any necessary arguments,
+ generates a sequence of lower level actions (or raise ActionPrimitiveError)
+ """
+ pass
diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py
new file mode 100644
index 000000000..e7a8d111c
--- /dev/null
+++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py
@@ -0,0 +1,1834 @@
+"""
+WARNING!
+The StarterSemanticActionPrimitive is a work-in-progress and is only provided as an example.
+It currently only works with Fetch and Tiago with their JointControllers set to delta mode.
+See provided tiago_primitives.yaml config file for an example. See examples/action_primitives for
+runnable examples.
+"""
+from functools import cached_property
+import inspect
+import logging
+import random
+from aenum import IntEnum, auto
+from math import ceil
+import cv2
+from matplotlib import pyplot as plt
+
+import gym
+import numpy as np
+from scipy.spatial.transform import Rotation, Slerp
+
+import omnigibson as og
+from omnigibson import object_states
+from omnigibson.action_primitives.action_primitive_set_base import ActionPrimitiveError, ActionPrimitiveErrorGroup, BaseActionPrimitiveSet
+from omnigibson.controllers import JointController, DifferentialDriveController
+from omnigibson.macros import create_module_macros
+from omnigibson.utils.object_state_utils import sample_cuboid_for_predicate
+from omnigibson.objects.object_base import BaseObject
+from omnigibson.robots import BaseRobot, Fetch, Tiago
+from omnigibson.tasks.behavior_task import BehaviorTask
+from omnigibson.utils.motion_planning_utils import (
+ plan_base_motion,
+ plan_arm_motion,
+ plan_arm_motion_ik,
+ set_base_and_detect_collision,
+ detect_robot_collision_in_sim
+)
+
+import omnigibson.utils.transform_utils as T
+from omnigibson.utils.control_utils import IKSolver
+from omnigibson.utils.grasping_planning_utils import (
+ get_grasp_poses_for_object_sticky,
+ get_grasp_position_for_open
+)
+from omnigibson.controllers.controller_base import ControlType
+from omnigibson.utils.control_utils import FKSolver
+
+from omni.usd.commands import CopyPrimCommand, CreatePrimCommand
+from omni.isaac.core.utils.prims import get_prim_at_path
+from omnigibson.utils.ui_utils import create_module_logger
+from pxr import Gf
+
+from omnigibson.objects.usd_object import USDObject
+
+m = create_module_macros(module_path=__file__)
+
+m.DEFAULT_BODY_OFFSET_FROM_FLOOR = 0.05
+
+m.KP_LIN_VEL = 0.3
+m.KP_ANGLE_VEL = 0.2
+
+m.MAX_STEPS_FOR_SETTLING = 500
+
+m.MAX_CARTESIAN_HAND_STEP = 0.002
+m.MAX_STEPS_FOR_HAND_MOVE_JOINT = 500
+m.MAX_STEPS_FOR_HAND_MOVE_IK = 1000
+m.MAX_STEPS_FOR_GRASP_OR_RELEASE = 30
+m.MAX_STEPS_FOR_WAYPOINT_NAVIGATION = 500
+m.MAX_ATTEMPTS_FOR_OPEN_CLOSE = 20
+
+m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_WITH_OBJECT_AND_PREDICATE = 20
+m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT = 200
+m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_IN_ROOM = 60
+m.PREDICATE_SAMPLING_Z_OFFSET = 0.02
+
+m.GRASP_APPROACH_DISTANCE = 0.2
+m.OPEN_GRASP_APPROACH_DISTANCE = 0.4
+
+m.DEFAULT_DIST_THRESHOLD = 0.05
+m.DEFAULT_ANGLE_THRESHOLD = 0.05
+m.LOW_PRECISION_DIST_THRESHOLD = 0.1
+m.LOW_PRECISION_ANGLE_THRESHOLD = 0.2
+
+m.TIAGO_TORSO_FIXED = False
+m.JOINT_POS_DIFF_THRESHOLD = 0.005
+m.JOINT_CONTROL_MIN_ACTION = 0.0
+m.MAX_ALLOWED_JOINT_ERROR_FOR_LINEAR_MOTION = np.deg2rad(45)
+
+log = create_module_logger(module_name=__name__)
+
+
+def indented_print(msg, *args, **kwargs):
+ log.debug(" " * len(inspect.stack()) + str(msg), *args, **kwargs)
+
+class RobotCopy:
+ """A data structure for storing information about a robot copy, used for collision checking in planning."""
+ def __init__(self):
+ self.prims = {}
+ self.meshes = {}
+ self.relative_poses = {}
+ self.links_relative_poses = {}
+ self.reset_pose = {
+ "original": ([0, 0, -5.0], [0, 0, 0, 1]),
+ "simplified": ([5, 0, -5.0], [0, 0, 0, 1]),
+ }
+
+class PlanningContext(object):
+ """
+ A context manager that sets up a robot copy for collision checking in planning.
+ """
+ def __init__(self, robot, robot_copy, robot_copy_type="original"):
+ self.robot = robot
+ self.robot_copy = robot_copy
+ self.robot_copy_type = robot_copy_type if robot_copy_type in robot_copy.prims.keys() else "original"
+ self.disabled_collision_pairs_dict = {}
+
+ def __enter__(self):
+ self._assemble_robot_copy()
+ self._construct_disabled_collision_pairs()
+ return self
+
+ def __exit__(self, *args):
+ self._set_prim_pose(self.robot_copy.prims[self.robot_copy_type], self.robot_copy.reset_pose[self.robot_copy_type])
+
+ def _assemble_robot_copy(self):
+ if m.TIAGO_TORSO_FIXED:
+ fk_descriptor = "left_fixed"
+ else:
+ fk_descriptor = "combined" if "combined" in self.robot.robot_arm_descriptor_yamls else self.robot.default_arm
+ self.fk_solver = FKSolver(
+ robot_description_path=self.robot.robot_arm_descriptor_yamls[fk_descriptor],
+ robot_urdf_path=self.robot.urdf_path,
+ )
+
+ # TODO: Remove the need for this after refactoring the FK / descriptors / etc.
+ arm_links = self.robot.manipulation_link_names
+
+ if m.TIAGO_TORSO_FIXED:
+ assert self.arm == "left", "Fixed torso mode only supports left arm!"
+ joint_control_idx = self.robot.arm_control_idx["left"]
+ joint_pos = np.array(self.robot.get_joint_positions()[joint_control_idx])
+ else:
+ joint_combined_idx = np.concatenate([self.robot.trunk_control_idx, self.robot.arm_control_idx[fk_descriptor]])
+ joint_pos = np.array(self.robot.get_joint_positions()[joint_combined_idx])
+ link_poses = self.fk_solver.get_link_poses(joint_pos, arm_links)
+
+ # Set position of robot copy root prim
+ self._set_prim_pose(self.robot_copy.prims[self.robot_copy_type], self.robot.get_position_orientation())
+
+ # Assemble robot meshes
+ for link_name, meshes in self.robot_copy.meshes[self.robot_copy_type].items():
+ for mesh_name, copy_mesh in meshes.items():
+ # Skip grasping frame (this is necessary for Tiago, but should be cleaned up in the future)
+ if "grasping_frame" in link_name:
+ continue
+ # Set poses of meshes relative to the robot to construct the robot
+ link_pose = link_poses[link_name] if link_name in arm_links else self.robot_copy.links_relative_poses[self.robot_copy_type][link_name]
+ mesh_copy_pose = T.pose_transform(*link_pose, *self.robot_copy.relative_poses[self.robot_copy_type][link_name][mesh_name])
+ self._set_prim_pose(copy_mesh, mesh_copy_pose)
+
+ def _set_prim_pose(self, prim, pose):
+ translation = Gf.Vec3d(*np.array(pose[0], dtype=float))
+ prim.GetAttribute("xformOp:translate").Set(translation)
+ orientation = np.array(pose[1], dtype=float)[[3, 0, 1, 2]]
+ prim.GetAttribute("xformOp:orient").Set(Gf.Quatd(*orientation))
+
+ def _construct_disabled_collision_pairs(self):
+ robot_meshes_copy = self.robot_copy.meshes[self.robot_copy_type]
+
+ # Filter out collision pairs of meshes part of the same link
+ for meshes in robot_meshes_copy.values():
+ for mesh in meshes.values():
+ self.disabled_collision_pairs_dict[mesh.GetPrimPath().pathString] = [m.GetPrimPath().pathString for m in meshes.values()]
+
+ # Filter out all self-collisions
+ if self.robot_copy_type == "simplified":
+ all_meshes = [mesh.GetPrimPath().pathString for link in robot_meshes_copy.keys() for mesh in robot_meshes_copy[link].values()]
+ for link in robot_meshes_copy.keys():
+ for mesh in robot_meshes_copy[link].values():
+ self.disabled_collision_pairs_dict[mesh.GetPrimPath().pathString] += all_meshes
+ # Filter out collision pairs of meshes part of disabled collision pairs
+ else:
+ for pair in self.robot.disabled_collision_pairs:
+ link_1 = pair[0]
+ link_2 = pair[1]
+ if link_1 in robot_meshes_copy.keys() and link_2 in robot_meshes_copy.keys():
+ for mesh in robot_meshes_copy[link_1].values():
+ self.disabled_collision_pairs_dict[mesh.GetPrimPath().pathString] += [m.GetPrimPath().pathString for m in robot_meshes_copy[link_2].values()]
+
+ for mesh in robot_meshes_copy[link_2].values():
+ self.disabled_collision_pairs_dict[mesh.GetPrimPath().pathString] += [m.GetPrimPath().pathString for m in robot_meshes_copy[link_1].values()]
+
+ # Filter out colliders all robot copy meshes should ignore
+ disabled_colliders = []
+
+ # Disable original robot colliders so copy can't collide with it
+ disabled_colliders += [link.prim_path for link in self.robot.links.values()]
+ filter_categories = ["floors"]
+ for obj in og.sim.scene.objects:
+ if obj.category in filter_categories:
+ disabled_colliders += [link.prim_path for link in obj.links.values()]
+
+ # Disable object in hand
+ obj_in_hand = self.robot._ag_obj_in_hand[self.robot.default_arm]
+ if obj_in_hand is not None:
+ disabled_colliders += [link.prim_path for link in obj_in_hand.links.values()]
+
+ for colliders in self.disabled_collision_pairs_dict.values():
+ colliders += disabled_colliders
+
+class StarterSemanticActionPrimitiveSet(IntEnum):
+ _init_ = 'value __doc__'
+ GRASP = auto(), "Grasp an object"
+ PLACE_ON_TOP = auto(), "Place the currently grasped object on top of another object"
+ PLACE_INSIDE = auto(), "Place the currently grasped object inside another object"
+ OPEN = auto(), "Open an object"
+ CLOSE = auto(), "Close an object"
+ NAVIGATE_TO = auto(), "Navigate to an object (mostly for debugging purposes - other primitives also navigate first)"
+ RELEASE = auto(), "Release an object, letting it fall to the ground. You can then grasp it again, as a way of reorienting your grasp of the object."
+ TOGGLE_ON = auto(), "Toggle an object on"
+ TOGGLE_OFF = auto(), "Toggle an object off"
+
+class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
+ def __init__(self, env, add_context=False, enable_head_tracking=True, always_track_eef=False, task_relevant_objects_only=False):
+ """
+ Initializes a StarterSemanticActionPrimitives generator.
+
+ Args:
+ env (Environment): The environment that the primitives will run on.
+ add_context (bool): Whether to add text context to the return value. Defaults to False.
+ enable_head_tracking (bool): Whether to enable head tracking. Defaults to True.
+ always_track_eef (bool, optional): Whether to always track the end effector, as opposed
+ to switching between target object and end effector based on context. Defaults to False.
+ task_relevant_objects_only (bool): Whether to only consider objects relevant to the task
+ when computing the action space. Defaults to False.
+ """
+ log.warning(
+ "The StarterSemanticActionPrimitive is a work-in-progress and is only provided as an example. "
+ "It currently only works with Fetch and Tiago with their JointControllers set to delta mode."
+ )
+ super().__init__(env)
+ self.controller_functions = {
+ StarterSemanticActionPrimitiveSet.GRASP: self._grasp,
+ StarterSemanticActionPrimitiveSet.PLACE_ON_TOP: self._place_on_top,
+ StarterSemanticActionPrimitiveSet.PLACE_INSIDE: self._place_inside,
+ StarterSemanticActionPrimitiveSet.OPEN: self._open,
+ StarterSemanticActionPrimitiveSet.CLOSE: self._close,
+ StarterSemanticActionPrimitiveSet.NAVIGATE_TO: self._navigate_to_obj,
+ StarterSemanticActionPrimitiveSet.RELEASE: self._execute_release,
+ StarterSemanticActionPrimitiveSet.TOGGLE_ON: self._toggle_on,
+ StarterSemanticActionPrimitiveSet.TOGGLE_OFF: self._toggle_off,
+ }
+ # Validate the robot
+ assert isinstance(self.robot, (Fetch, Tiago)), "StarterSemanticActionPrimitives only works with Fetch and Tiago."
+ assert isinstance(self.robot.controllers["base"], (JointController, DifferentialDriveController)), \
+ "StarterSemanticActionPrimitives only works with a JointController or DifferentialDriveController at the robot base."
+ self._base_controller_is_joint = isinstance(self.robot.controllers["base"], JointController)
+ if self._base_controller_is_joint:
+ assert self.robot.controllers["base"].control_type == ControlType.VELOCITY, \
+ "StarterSemanticActionPrimitives only works with a base JointController with velocity mode."
+ assert not self.robot.controllers["base"].use_delta_commands, \
+ "StarterSemanticActionPrimitives only works with a base JointController with absolute mode."
+ assert self.robot.controllers["base"].command_dim == 3, \
+ "StarterSemanticActionPrimitives only works with a base JointController with 3 dof (x, y, theta)."
+
+ self.arm = self.robot.default_arm
+ self.robot_model = self.robot.model_name
+ self.robot_base_mass = self.robot._links["base_link"].mass
+ self.add_context = add_context
+
+ self._task_relevant_objects_only = task_relevant_objects_only
+
+ self._enable_head_tracking = enable_head_tracking
+ self._always_track_eef = always_track_eef
+ self._tracking_object = None
+
+ self.robot_copy = self._load_robot_copy()
+
+ def _postprocess_action(self, action):
+ """Postprocesses action by applying head tracking and adding context if necessary."""
+ action = self._overwrite_head_action(action)
+
+ if not self.add_context:
+ return action
+
+ stack = inspect.stack()
+ action_type = "manip:"
+ context_function = stack[1].function
+
+ for frame_info in stack[1:]:
+ function_name = frame_info.function
+ # TODO: Make this stop at apply_ref
+ if function_name in ["_grasp", "_place_on_top", "_place_or_top", "_open_or_close"]:
+ break
+ if "nav" in function_name:
+ action_type = "nav"
+
+ context = action_type + context_function
+ return action, context
+
+ def _load_robot_copy(self):
+ """Loads a copy of the robot that can be manipulated into arbitrary configurations for collision checking in planning."""
+ robot_copy = RobotCopy()
+
+ robots_to_copy = {
+ "original": {
+ "robot": self.robot,
+ "copy_path": "/World/robot_copy"
+ }
+ }
+ if hasattr(self.robot, 'simplified_mesh_usd_path'):
+ simplified_robot = {
+ "robot": USDObject("simplified_copy", self.robot.simplified_mesh_usd_path),
+ "copy_path": "/World/simplified_robot_copy"
+ }
+ robots_to_copy['simplified'] = simplified_robot
+
+ for robot_type, rc in robots_to_copy.items():
+ copy_robot = None
+ copy_robot_meshes = {}
+ copy_robot_meshes_relative_poses = {}
+ copy_robot_links_relative_poses = {}
+
+ # Create prim under which robot meshes are nested and set position
+ CreatePrimCommand("Xform", rc['copy_path']).do()
+ copy_robot = get_prim_at_path(rc['copy_path'])
+ reset_pose = robot_copy.reset_pose[robot_type]
+ translation = Gf.Vec3d(*np.array(reset_pose[0], dtype=float))
+ copy_robot.GetAttribute("xformOp:translate").Set(translation)
+ orientation = np.array(reset_pose[1], dtype=float)[[3, 0, 1, 2]]
+ copy_robot.GetAttribute("xformOp:orient").Set(Gf.Quatd(*orientation))
+
+ robot_to_copy = None
+ if robot_type == "simplified":
+ robot_to_copy = rc['robot']
+ og.sim.import_object(robot_to_copy)
+ else:
+ robot_to_copy = rc['robot']
+
+ # Copy robot meshes
+ for link in robot_to_copy.links.values():
+ link_name = link.prim_path.split("/")[-1]
+ for mesh_name, mesh in link.collision_meshes.items():
+ split_path = mesh.prim_path.split("/")
+ # Do not copy grasping frame (this is necessary for Tiago, but should be cleaned up in the future)
+ if "grasping_frame" in link_name:
+ continue
+
+ copy_mesh_path = rc['copy_path'] + "/" + link_name
+ copy_mesh_path += f"_{split_path[-1]}" if split_path[-1] != "collisions" else ""
+ CopyPrimCommand(mesh.prim_path, path_to=copy_mesh_path).do()
+ copy_mesh = get_prim_at_path(copy_mesh_path)
+ relative_pose = T.relative_pose_transform(*mesh.get_position_orientation(), *link.get_position_orientation())
+ relative_pose = (relative_pose[0], np.array([0, 0, 0, 1]))
+ if link_name not in copy_robot_meshes.keys():
+ copy_robot_meshes[link_name] = {mesh_name: copy_mesh}
+ copy_robot_meshes_relative_poses[link_name] = {mesh_name: relative_pose}
+ else:
+ copy_robot_meshes[link_name][mesh_name] = copy_mesh
+ copy_robot_meshes_relative_poses[link_name][mesh_name] = relative_pose
+
+ copy_robot_links_relative_poses[link_name] = T.relative_pose_transform(*link.get_position_orientation(), *self.robot.get_position_orientation())
+
+ if robot_type == "simplified":
+ og.sim.remove_object(robot_to_copy)
+
+ robot_copy.prims[robot_type] = copy_robot
+ robot_copy.meshes[robot_type] = copy_robot_meshes
+ robot_copy.relative_poses[robot_type] = copy_robot_meshes_relative_poses
+ robot_copy.links_relative_poses[robot_type] = copy_robot_links_relative_poses
+
+ og.sim.step()
+ return robot_copy
+
+ def get_action_space(self):
+ # TODO: Figure out how to implement what happens when the set of objects in scene changes.
+ if self._task_relevant_objects_only:
+ assert isinstance(self.env.task, BehaviorTask), "Activity relevant objects can only be used for BEHAVIOR tasks"
+ self.addressable_objects = sorted(set(self.env.task.object_scope.values()), key=lambda obj: obj.name)
+ else:
+ self.addressable_objects = sorted(set(self.env.scene.objects_by_name.values()), key=lambda obj: obj.name)
+
+ # Filter out the robots.
+ self.addressable_objects = [obj for obj in self.addressable_objects if not isinstance(obj, BaseRobot)]
+
+ self.num_objects = len(self.addressable_objects)
+ return gym.spaces.Tuple(
+ [gym.spaces.Discrete(self.num_objects), gym.spaces.Discrete(len(StarterSemanticActionPrimitiveSet))]
+ )
+
+ def get_action_from_primitive_and_object(self, primitive: StarterSemanticActionPrimitiveSet, obj: BaseObject):
+ assert obj in self.addressable_objects
+ primitive_int = int(primitive)
+ return primitive_int, self.addressable_objects.index(obj)
+
+ def _get_obj_in_hand(self):
+ """
+ Get object in the robot's hand
+
+ Returns:
+ StatefulObject or None: Object if robot is holding something or None if it is not
+ """
+ obj_in_hand = self.robot._ag_obj_in_hand[self.arm] # TODO(MP): Expose this interface.
+ return obj_in_hand
+
+ def apply(self, action):
+ # Decompose the tuple
+ action_idx, obj_idx = action
+
+ # Find the target object.
+ target_obj = self.addressable_objects[obj_idx]
+
+ # Find the appropriate action generator.
+ action = StarterSemanticActionPrimitiveSet(action_idx)
+ return self.apply_ref(action, target_obj)
+
+ def apply_ref(self, prim, *args, attempts=3):
+ """
+ Yields action for robot to execute the primitive with the given arguments.
+
+ Args:
+ prim (StarterSemanticActionPrimitiveSet): Primitive to execute
+ args: Arguments for the primitive
+ attempts (int): Number of attempts to make before raising an error
+
+ Yields:
+ np.array or None: Action array for one step for the robot to execute the primitve or None if primitive completed
+
+ Raises:
+ ActionPrimitiveError: If primitive fails to execute
+ """
+ assert attempts > 0, "Must make at least one attempt"
+ ctrl = self.controller_functions[prim]
+
+ errors = []
+ for _ in range(attempts):
+ # Attempt
+ success = False
+ try:
+ yield from ctrl(*args)
+ success = True
+ except ActionPrimitiveError as e:
+ errors.append(e)
+
+ try:
+ # If we're not holding anything, release the hand so it doesn't stick to anything else.
+ if not self._get_obj_in_hand():
+ yield from self._execute_release()
+ except ActionPrimitiveError:
+ pass
+
+ try:
+ # Make sure we retract the arm after every step
+ yield from self._reset_hand()
+ except ActionPrimitiveError:
+ pass
+
+ try:
+ # Settle before returning.
+ yield from self._settle_robot()
+ except ActionPrimitiveError:
+ pass
+
+ # Stop on success
+ if success:
+ return
+
+ raise ActionPrimitiveErrorGroup(errors)
+
+ def _open(self, obj):
+ yield from self._open_or_close(obj, True)
+
+ def _close(self, obj):
+ yield from self._open_or_close(obj, False)
+
+ def _open_or_close(self, obj, should_open):
+ # Update the tracking to track the eef.
+ self._tracking_object = self.robot
+
+ if self._get_obj_in_hand():
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
+ "Cannot open or close an object while holding an object",
+ {"object in hand": self._get_obj_in_hand().name},
+ )
+
+ # Open the hand first
+ yield from self._execute_release()
+
+ for _ in range(m.MAX_ATTEMPTS_FOR_OPEN_CLOSE):
+ try:
+ # TODO: This needs to be fixed. Many assumptions (None relevant joint, 3 waypoints, etc.)
+ if should_open:
+ grasp_data = get_grasp_position_for_open(self.robot, obj, should_open, None)
+ else:
+ grasp_data = get_grasp_position_for_open(self.robot, obj, should_open, None, num_waypoints=3)
+
+ if grasp_data is None:
+ # We were trying to do something but didn't have the data.
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.SAMPLING_ERROR,
+ "Could not sample grasp position for target object",
+ {"target object": obj.name},
+ )
+
+ relevant_joint, grasp_pose, target_poses, object_direction, grasp_required, pos_change = grasp_data
+ if abs(pos_change) < 0.1:
+ indented_print("Yaw change is small and done,", pos_change)
+ return
+
+ # Prepare data for the approach later.
+ approach_pos = grasp_pose[0] + object_direction * m.OPEN_GRASP_APPROACH_DISTANCE
+ approach_pose = (approach_pos, grasp_pose[1])
+
+ # If the grasp pose is too far, navigate
+ yield from self._navigate_if_needed(obj, pose_on_obj=grasp_pose)
+
+ yield from self._move_hand(grasp_pose, stop_if_stuck=True)
+
+ # We can pre-grasp in sticky grasping mode only for opening
+ if should_open:
+ yield from self._execute_grasp()
+
+ # Since the grasp pose is slightly off the object, we want to move towards the object, around 5cm.
+ # It's okay if we can't go all the way because we run into the object.
+ yield from self._navigate_if_needed(obj, pose_on_obj=approach_pose)
+
+ if should_open:
+ yield from self._move_hand_linearly_cartesian(approach_pose, ignore_failure=False, stop_on_contact=True, stop_if_stuck=True)
+ else:
+ yield from self._move_hand_linearly_cartesian(approach_pose, ignore_failure=False, stop_if_stuck=True)
+
+ # Step once to update
+ empty_action = self._empty_action()
+ yield self._postprocess_action(empty_action)
+
+ for i, target_pose in enumerate(target_poses):
+ yield from self._move_hand_linearly_cartesian(target_pose, ignore_failure=False, stop_if_stuck=True)
+
+ # Moving to target pose often fails. This might leave the robot's motors with torques that
+ # try to get to a far-away position thus applying large torques, but unable to move due to
+ # the sticky grasp joint. Thus if we release the joint, the robot might suddenly launch in an
+ # arbitrary direction. To avoid this, we command the hand to apply torques with its current
+ # position as its target. This prevents the hand from jerking into some other position when we do a release.
+ yield from self._move_hand_linearly_cartesian(
+ self.robot.eef_links[self.arm].get_position_orientation(),
+ ignore_failure=True,
+ stop_if_stuck=True
+ )
+
+ if should_open:
+ yield from self._execute_release()
+ yield from self._move_base_backward()
+
+ except ActionPrimitiveError as e:
+ indented_print(e)
+ if should_open:
+ yield from self._execute_release()
+ yield from self._move_base_backward()
+ else:
+ yield from self._move_hand_backward()
+
+ if obj.states[object_states.Open].get_value() != should_open:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.POST_CONDITION_ERROR,
+ "Despite executing the planned trajectory, the object did not open or close as expected. Maybe try again",
+ {"target object": obj.name, "is it currently open": obj.states[object_states.Open].get_value()},
+ )
+
+ # TODO: Figure out how to generalize out of this "backing out" behavior.
+ def _move_base_backward(self, steps=5, speed=0.2):
+ """
+ Yields action for the robot to move base so the eef is in the target pose using the planner
+
+ Args:
+ steps (int): steps to move base
+ speed (float): base speed
+
+ Returns:
+ np.array or None: Action array for one step for the robot to move base or None if its at the target pose
+ """
+ for _ in range(steps):
+ action = self._empty_action()
+ action[self.robot.controller_action_idx["gripper_{}".format(self.arm)]] = 1.0
+ action[self.robot.base_control_idx[0]] = -speed
+ yield self._postprocess_action(action)
+
+ def _move_hand_backward(self, steps=5, speed=0.2):
+ """
+ Yields action for the robot to move its base backwards.
+
+ Args:
+ steps (int): steps to move eef
+ speed (float): eef speed
+
+ Returns:
+ np.array or None: Action array for one step for the robot to move hand or None if its at the target pose
+ """
+ for _ in range(steps):
+ action = self._empty_action()
+ action[self.robot.controller_action_idx["gripper_{}".format(self.arm)]] = 1.0
+ action[self.robot.controller_action_idx["arm_{}".format(self.arm)][0]] = -speed
+ yield self._postprocess_action(action)
+
+ def _move_hand_upward(self, steps=5, speed=0.1):
+ """
+ Yields action for the robot to move hand upward.
+
+ Args:
+ steps (int): steps to move eef
+ speed (float): eef speed
+
+ Returns:
+ np.array or None: Action array for one step for the robot to move hand or None if its at the target pose
+ """
+ # TODO: Combine these movement functions.
+ for _ in range(steps):
+ action = self._empty_action()
+ action[self.robot.controller_action_idx["gripper_{}".format(self.arm)]] = 1.0
+ action[self.robot.controller_action_idx["arm_{}".format(self.arm)][2]] = speed
+ yield self._postprocess_action(action)
+
+ def _grasp(self, obj):
+ """
+ Yields action for the robot to navigate to object if needed, then to grasp it
+
+ Args:
+ StatefulObject: Object for robot to grasp
+
+ Returns:
+ np.array or None: Action array for one step for the robot to grasp or None if grasp completed
+ """
+ # Update the tracking to track the object.
+ self._tracking_object = obj
+
+ # Don't do anything if the object is already grasped.
+ obj_in_hand = self._get_obj_in_hand()
+ if obj_in_hand is not None:
+ if obj_in_hand == obj:
+ return
+ else:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
+ "Cannot grasp when your hand is already full",
+ {"target object": obj.name, "object currently in hand": obj_in_hand.name},
+ )
+
+ # Open the hand first
+ yield from self._execute_release()
+
+ # Allow grasping from suboptimal extents if we've tried enough times.
+ grasp_poses = get_grasp_poses_for_object_sticky(obj)
+ grasp_pose, object_direction = random.choice(grasp_poses)
+
+ # Prepare data for the approach later.
+ approach_pos = grasp_pose[0] + object_direction * m.GRASP_APPROACH_DISTANCE
+ approach_pose = (approach_pos, grasp_pose[1])
+
+ # If the grasp pose is too far, navigate.
+ yield from self._navigate_if_needed(obj, pose_on_obj=grasp_pose)
+ yield from self._move_hand(grasp_pose)
+
+ # We can pre-grasp in sticky grasping mode.
+ yield from self._execute_grasp()
+
+ # Since the grasp pose is slightly off the object, we want to move towards the object, around 5cm.
+ # It's okay if we can't go all the way because we run into the object.
+ indented_print("Performing grasp approach")
+ yield from self._move_hand_linearly_cartesian(approach_pose, stop_on_contact=True)
+
+ # Step once to update
+ empty_action = self._empty_action()
+ yield self._postprocess_action(empty_action)
+
+ if self._get_obj_in_hand() is None:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.POST_CONDITION_ERROR,
+ "Grasp completed, but no object detected in hand after executing grasp",
+ {"target object": obj.name},
+ )
+
+ yield from self._reset_hand()
+
+ if self._get_obj_in_hand() != obj:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.POST_CONDITION_ERROR,
+ "An unexpected object was detected in hand after executing grasp. Consider releasing it",
+ {"expected object": obj.name, "actual object": self._get_obj_in_hand().name},
+ )
+
+ def _place_on_top(self, obj):
+ """
+ Yields action for the robot to navigate to the object if needed, then to place an object on it
+
+ Args:
+ obj (StatefulObject): Object for robot to place the object in its hand on
+
+ Returns:
+ np.array or None: Action array for one step for the robot to place or None if place completed
+ """
+ yield from self._place_with_predicate(obj, object_states.OnTop)
+
+ def _place_inside(self, obj):
+ """
+ Yields action for the robot to navigate to the object if needed, then to place an object in it
+
+ Args:
+ obj (StatefulObject): Object for robot to place the object in its hand on
+
+ Returns:
+ np.array or None: Action array for one step for the robot to place or None if place completed
+ """
+ yield from self._place_with_predicate(obj, object_states.Inside)
+
+ def _toggle_on(self, obj):
+ yield from self._toggle(obj, True)
+
+ def _toggle_off(self, obj):
+ yield from self._toggle(obj, False)
+
+ def _toggle(self, obj, value):
+ if obj.states[object_states.ToggledOn].get_value() == value:
+ return
+
+ # Put the hand in the toggle marker.
+ toggle_state = obj.states[object_states.ToggledOn]
+ toggle_position = toggle_state.get_link_position()
+ yield from self._navigate_if_needed(obj, toggle_position)
+
+ hand_orientation = self.robot.eef_links[self.arm].get_orientation() # Just keep the current hand orientation.
+ desired_hand_pose = (toggle_position, hand_orientation)
+
+ yield from self._move_hand(desired_hand_pose)
+
+ if obj.states[object_states.ToggledOn].get_value() != value:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.POST_CONDITION_ERROR,
+ "The object did not toggle as expected - maybe try again",
+ {"target object": obj.name, "is it currently toggled on": obj.states[object_states.ToggledOn].get_value()}
+ )
+
+ def _place_with_predicate(self, obj, predicate):
+ """
+ Yields action for the robot to navigate to the object if needed, then to place it
+
+ Args:
+ obj (StatefulObject): Object for robot to place the object in its hand on
+ predicate (object_states.OnTop or object_states.Inside): Determines whether to place on top or inside
+
+ Returns:
+ np.array or None: Action array for one step for the robot to place or None if place completed
+ """
+ # Update the tracking to track the object.
+ self._tracking_object = obj
+
+ obj_in_hand = self._get_obj_in_hand()
+ if obj_in_hand is None:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, "You need to be grasping an object first to place it somewhere."
+ )
+
+ # Sample location to place object
+ obj_pose = self._sample_pose_with_object_and_predicate(predicate, obj_in_hand, obj)
+ hand_pose = self._get_hand_pose_for_object_pose(obj_pose)
+
+ yield from self._navigate_if_needed(obj, pose_on_obj=hand_pose)
+ yield from self._move_hand(hand_pose)
+ yield from self._execute_release()
+
+ if self._get_obj_in_hand() is not None:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.EXECUTION_ERROR,
+ "Could not release object - the object is still in your hand",
+ {"object": self._get_obj_in_hand().name}
+ )
+
+ if not obj_in_hand.states[predicate].get_value(obj):
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.EXECUTION_ERROR,
+ "Failed to place object at the desired place (probably dropped). The object was still released, so you need to grasp it again to continue",
+ {"dropped object": obj_in_hand.name, "target object": obj.name}
+ )
+
+ yield from self._move_hand_upward()
+
+ def _convert_cartesian_to_joint_space(self, target_pose):
+ """
+ Gets joint positions for the arm so eef is at the target pose
+
+ Args:
+ target_pose (Iterable of array): Position and orientation arrays in an iterable for pose for the eef
+
+ Returns:
+ 2-tuple
+ - np.array or None: Joint positions to reach target pose or None if impossible to reach target pose
+ - np.array: Indices for joints in the robot
+ """
+ relative_target_pose = self._get_pose_in_robot_frame(target_pose)
+ joint_pos = self._ik_solver_cartesian_to_joint_space(relative_target_pose)
+ if joint_pos is None:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PLANNING_ERROR,
+ "Could not find joint positions for target pose. You cannot reach it. Try again for a new pose"
+ )
+ return joint_pos
+
+ def _target_in_reach_of_robot(self, target_pose):
+ """
+ Determines whether the eef for the robot can reach the target pose in the world frame
+
+ Args:
+ target_pose (Iterable of array): Position and orientation arrays in an iterable for the pose for the eef
+
+ Returns:
+ bool: Whether eef can reach the target pose
+ """
+ relative_target_pose = self._get_pose_in_robot_frame(target_pose)
+ return self._target_in_reach_of_robot_relative(relative_target_pose)
+
+ def _target_in_reach_of_robot_relative(self, relative_target_pose):
+ """
+ Determines whether eef for the robot can reach the target pose where the target pose is in the robot frame
+
+ Args:
+ target_pose (Iterable of array): Position and orientation arrays in an iterable for pose for the eef
+
+ Returns:
+ bool: Whether eef can the reach target pose
+ """
+ return self._ik_solver_cartesian_to_joint_space(relative_target_pose) is not None
+
+ @cached_property
+ def _manipulation_control_idx(self):
+ """The appropriate manipulation control idx for the current settings."""
+ if isinstance(self.robot, Tiago):
+ if m.TIAGO_TORSO_FIXED:
+ assert self.arm == "left", "Fixed torso mode only supports left arm!"
+ return self.robot.arm_control_idx["left"]
+ else:
+ return np.concatenate([self.robot.trunk_control_idx, self.robot.arm_control_idx[self.arm]])
+
+ # Otherwise just return the default arm control idx
+ return self.robot.arm_control_idx[self.arm]
+
+ @cached_property
+ def _manipulation_descriptor_path(self):
+ """The appropriate manipulation descriptor for the current settings."""
+ if isinstance(self.robot, Tiago) and m.TIAGO_TORSO_FIXED:
+ assert self.arm == "left", "Fixed torso mode only supports left arm!"
+ return self.robot.robot_arm_descriptor_yamls["left_fixed"]
+
+ # Otherwise just return the default arm control idx
+ return self.robot.robot_arm_descriptor_yamls[self.arm]
+
+ def _ik_solver_cartesian_to_joint_space(self, relative_target_pose):
+ """
+ Get joint positions for the arm so eef is at the target pose where the target pose is in the robot frame
+
+ Args:
+ relative_target_pose (Iterable of array): Position and orientation arrays in an iterable for pose in the robot frame
+
+ Returns:
+ 2-tuple
+ - np.array or None: Joint positions to reach target pose or None if impossible to reach the target pose
+ - np.array: Indices for joints in the robot
+ """
+ ik_solver = IKSolver(
+ robot_description_path=self._manipulation_descriptor_path,
+ robot_urdf_path=self.robot.urdf_path,
+ default_joint_pos=self.robot.default_joint_pos[self._manipulation_control_idx],
+ eef_name=self.robot.eef_link_names[self.arm],
+ )
+ # Grab the joint positions in order to reach the desired pose target
+ joint_pos = ik_solver.solve(
+ target_pos=relative_target_pose[0],
+ target_quat=relative_target_pose[1],
+ max_iterations=100,
+ )
+
+ return joint_pos
+
+ def _move_hand(self, target_pose, stop_if_stuck=False):
+ """
+ Yields action for the robot to move hand so the eef is in the target pose using the planner
+
+ Args:
+ target_pose (Iterable of array): Position and orientation arrays in an iterable for pose
+
+ Returns:
+ np.array or None: Action array for one step for the robot to move hand or None if its at the target pose
+ """
+ yield from self._settle_robot()
+ controller_config = self.robot._controller_config["arm_" + self.arm]
+ if controller_config["name"] == "InverseKinematicsController":
+ target_pose_relative = self._get_pose_in_robot_frame(target_pose)
+ yield from self._move_hand_ik(target_pose_relative, stop_if_stuck=stop_if_stuck)
+ else:
+ joint_pos = self._convert_cartesian_to_joint_space(target_pose)
+ yield from self._move_hand_joint(joint_pos)
+
+ def _move_hand_joint(self, joint_pos):
+ """
+ Yields action for the robot to move arm to reach the specified joint positions using the planner
+
+ Args:
+ joint_pos (np.array): Joint positions for the arm
+
+ Returns:
+ np.array or None: Action array for one step for the robot to move arm or None if its at the joint positions
+ """
+ with PlanningContext(self.robot, self.robot_copy, "original") as context:
+ plan = plan_arm_motion(
+ robot=self.robot,
+ end_conf=joint_pos,
+ context=context,
+ torso_fixed=m.TIAGO_TORSO_FIXED,
+ )
+
+ # plan = self._add_linearly_interpolated_waypoints(plan, 0.1)
+ if plan is None:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PLANNING_ERROR,
+ "There is no accessible path from where you are to the desired joint position. Try again"
+ )
+
+ # Follow the plan to navigate.
+ indented_print("Plan has %d steps", len(plan))
+ for i, joint_pos in enumerate(plan):
+ indented_print("Executing grasp plan step %d/%d", i + 1, len(plan))
+ yield from self._move_hand_direct_joint(joint_pos, ignore_failure=True)
+
+ def _move_hand_ik(self, eef_pose, stop_if_stuck=False):
+ """
+ Yields action for the robot to move arm to reach the specified eef positions using the planner
+
+ Args:
+ eef_pose (np.array): End Effector pose for the arm
+
+ Returns:
+ np.array or None: Action array for one step for the robot to move arm or None if its at the joint positions
+ """
+ eef_pos = eef_pose[0]
+ eef_ori = T.quat2axisangle(eef_pose[1])
+ end_conf = np.append(eef_pos, eef_ori)
+
+ with PlanningContext(self.robot, self.robot_copy, "original") as context:
+ plan = plan_arm_motion_ik(
+ robot=self.robot,
+ end_conf=end_conf,
+ context=context,
+ torso_fixed=m.TIAGO_TORSO_FIXED,
+ )
+
+ # plan = self._add_linearly_interpolated_waypoints(plan, 0.1)
+ if plan is None:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PLANNING_ERROR,
+ "There is no accessible path from where you are to the desired joint position. Try again"
+ )
+
+ # Follow the plan to navigate.
+ indented_print("Plan has %d steps", len(plan))
+ for i, target_pose in enumerate(plan):
+ target_pos = target_pose[:3]
+ target_quat = T.axisangle2quat(target_pose[3:])
+ indented_print("Executing grasp plan step %d/%d", i + 1, len(plan))
+ yield from self._move_hand_direct_ik((target_pos, target_quat), ignore_failure=True, in_world_frame=False, stop_if_stuck=stop_if_stuck)
+
+ def _add_linearly_interpolated_waypoints(self, plan, max_inter_dist):
+ """
+ Adds waypoints to the plan so the distance between values in the plan never exceeds the max_inter_dist.
+
+ Args:
+ plan (Array of arrays): Planned path
+ max_inter_dist (float): Maximum distance between values in the plan
+
+ Returns:
+ Array of arrays: Planned path with additional waypoints
+ """
+ plan = np.array(plan)
+ interpolated_plan = []
+ for i in range(len(plan) - 1):
+ max_diff = max(plan[i+1] - plan[i])
+ num_intervals = ceil(max_diff / max_inter_dist)
+ interpolated_plan += np.linspace(plan[i], plan[i+1], num_intervals, endpoint=False).tolist()
+ interpolated_plan.append(plan[-1].tolist())
+ return interpolated_plan
+
+ def _move_hand_direct_joint(self, joint_pos, stop_on_contact=False, ignore_failure=False):
+ """
+ Yields action for the robot to move its arm to reach the specified joint positions by directly actuating with no planner
+
+ Args:
+ joint_pos (np.array): Array of joint positions for the arm
+ stop_on_contact (boolean): Determines whether to stop move once an object is hit
+ ignore_failure (boolean): Determines whether to throw error for not reaching final joint positions
+
+ Returns:
+ np.array or None: Action array for one step for the robot to move arm or None if its at the joint positions
+ """
+ controller_name = f"arm_{self.arm}"
+ use_delta = self.robot._controllers[controller_name].use_delta_commands
+
+ action = self._empty_action()
+ controller_name = "arm_{}".format(self.arm)
+
+ action[self.robot.controller_action_idx[controller_name]] = joint_pos
+ prev_eef_pos = np.zeros(3)
+
+ for _ in range(m.MAX_STEPS_FOR_HAND_MOVE_JOINT):
+ current_joint_pos = self.robot.get_joint_positions()[self._manipulation_control_idx]
+ diff_joint_pos = np.array(current_joint_pos) - np.array(joint_pos)
+ if np.max(np.abs(diff_joint_pos)) < m.JOINT_POS_DIFF_THRESHOLD:
+ return
+ if stop_on_contact and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False):
+ return
+ if np.max(np.abs(self.robot.get_eef_position(self.arm) - prev_eef_pos)) < 0.0001:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.EXECUTION_ERROR,
+ f"Hand got stuck during execution."
+ )
+
+ if use_delta:
+ # Convert actions to delta.
+ action[self.robot.controller_action_idx[controller_name]] = diff_joint_pos
+
+ prev_eef_pos = self.robot.get_eef_position(self.arm)
+ yield self._postprocess_action(action)
+
+ if not ignore_failure:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.EXECUTION_ERROR,
+ "Your hand was obstructed from moving to the desired joint position"
+ )
+
+ def _move_hand_direct_ik(self, target_pose, stop_on_contact=False, ignore_failure=False, pos_thresh=0.04, ori_thresh=0.4, in_world_frame=True, stop_if_stuck=False):
+ """
+ Moves the hand to a target pose using inverse kinematics.
+
+ Args:
+ target_pose (tuple): A tuple of two elements, representing the target pose of the hand as a position and a quaternion.
+ stop_on_contact (bool, optional): Whether to stop the movement if the hand collides with an object. Defaults to False.
+ ignore_failure (bool, optional): Whether to raise an exception if the movement fails. Defaults to False.
+ pos_thresh (float, optional): The position threshold for considering the target pose reached. Defaults to 0.04.
+ ori_thresh (float, optional): The orientation threshold for considering the target pose reached. Defaults to 0.4.
+ in_world_frame (bool, optional): Whether the target pose is given in the world frame. Defaults to True.
+ stop_if_stuck (bool, optional): Whether to stop the movement if the hand is stuck. Defaults to False.
+
+ Yields:
+ numpy.ndarray: The action to be executed by the robot controller.
+
+ Raises:
+ ActionPrimitiveError: If the movement fails and ignore_failure is False.
+ """
+ # make sure controller is InverseKinematicsController and in expected mode
+ controller_config = self.robot._controller_config["arm_" + self.arm]
+ assert controller_config["name"] == "InverseKinematicsController", "Controller must be InverseKinematicsController"
+ assert controller_config["mode"] == "pose_absolute_ori", "Controller must be in pose_delta_ori mode"
+ if in_world_frame:
+ target_pose = self._get_pose_in_robot_frame(target_pose)
+ target_pos = target_pose[0]
+ target_orn = target_pose[1]
+ target_orn_axisangle = T.quat2axisangle(target_pose[1])
+ action = self._empty_action()
+ control_idx = self.robot.controller_action_idx["arm_" + self.arm]
+ prev_pos = prev_orn = None
+
+ for i in range(m.MAX_STEPS_FOR_HAND_MOVE_IK):
+ current_pose = self._get_pose_in_robot_frame((self.robot.get_eef_position(), self.robot.get_eef_orientation()))
+ current_pos = current_pose[0]
+ current_orn = current_pose[1]
+
+ delta_pos = target_pos - current_pos
+ target_pos_diff = np.linalg.norm(delta_pos)
+ target_orn_diff = (Rotation.from_quat(target_orn) * Rotation.from_quat(current_orn).inv()).magnitude()
+ reached_goal = target_pos_diff < pos_thresh and target_orn_diff < ori_thresh
+ if reached_goal:
+ return
+
+ if stop_on_contact and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False):
+ return
+
+ # if i > 0 and stop_if_stuck and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False):
+ if i > 0 and stop_if_stuck:
+ pos_diff = np.linalg.norm(prev_pos - current_pos)
+ orn_diff = (Rotation.from_quat(prev_orn) * Rotation.from_quat(current_orn).inv()).magnitude()
+ orn_diff = (Rotation.from_quat(prev_orn) * Rotation.from_quat(current_orn).inv()).magnitude()
+ orn_diff = (Rotation.from_quat(prev_orn) * Rotation.from_quat(current_orn).inv()).magnitude()
+ if pos_diff < 0.0003 and orn_diff < 0.01:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.EXECUTION_ERROR,
+ f"Hand is stuck"
+ )
+
+ prev_pos = current_pos
+ prev_orn = current_orn
+
+ action[control_idx] = np.concatenate([delta_pos, target_orn_axisangle])
+ yield self._postprocess_action(action)
+
+ if not ignore_failure:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.EXECUTION_ERROR,
+ "Your hand was obstructed from moving to the desired joint position"
+ )
+
+ def _move_hand_linearly_cartesian(self, target_pose, stop_on_contact=False, ignore_failure=False, stop_if_stuck=False):
+ """
+ Yields action for the robot to move its arm to reach the specified target pose by moving the eef along a line in cartesian
+ space from its current pose
+
+ Args:
+ target_pose (Iterable of array): Position and orientation arrays in an iterable for pose
+ stop_on_contact (boolean): Determines whether to stop move once an object is hit
+ ignore_failure (boolean): Determines whether to throw error for not reaching final joint positions
+
+ Returns:
+ np.array or None: Action array for one step for the robot to move arm or None if its at the target pose
+ """
+ # To make sure that this happens in a roughly linear fashion, we will divide the trajectory
+ # into 1cm-long pieces
+ start_pos, start_orn = self.robot.eef_links[self.arm].get_position_orientation()
+ travel_distance = np.linalg.norm(target_pose[0] - start_pos)
+ num_poses = np.max([2, int(travel_distance / m.MAX_CARTESIAN_HAND_STEP) + 1])
+ pos_waypoints = np.linspace(start_pos, target_pose[0], num_poses)
+
+ # Also interpolate the rotations
+ combined_rotation = Rotation.from_quat(np.array([start_orn, target_pose[1]]))
+ slerp = Slerp([0, 1], combined_rotation)
+ orn_waypoints = slerp(np.linspace(0, 1, num_poses))
+ quat_waypoints = [x.as_quat() for x in orn_waypoints]
+
+ controller_config = self.robot._controller_config["arm_" + self.arm]
+ if controller_config["name"] == "InverseKinematicsController":
+ waypoints = list(zip(pos_waypoints, quat_waypoints))
+
+ for i, waypoint in enumerate(waypoints):
+ if i < len(waypoints) - 1:
+ yield from self._move_hand_direct_ik(waypoint, stop_on_contact=stop_on_contact, ignore_failure=ignore_failure, stop_if_stuck=stop_if_stuck)
+ else:
+ yield from self._move_hand_direct_ik(
+ waypoints[-1],
+ pos_thresh=0.01, ori_thresh=0.1,
+ stop_on_contact=stop_on_contact,
+ ignore_failure=ignore_failure,
+ stop_if_stuck=stop_if_stuck
+ )
+
+ # Also decide if we can stop early.
+ current_pos, current_orn = self.robot.eef_links[self.arm].get_position_orientation()
+ pos_diff = np.linalg.norm(np.array(current_pos) - np.array(target_pose[0]))
+ orn_diff = (Rotation.from_quat(current_orn) * Rotation.from_quat(target_pose[1]).inv()).magnitude()
+ if pos_diff < 0.005 and orn_diff < np.deg2rad(0.1):
+ return
+
+ if stop_on_contact and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False):
+ return
+
+ if not ignore_failure:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.EXECUTION_ERROR,
+ "Your hand was obstructed from moving to the desired world position"
+ )
+ else:
+ # Use joint positions
+ joint_space_data = [self._convert_cartesian_to_joint_space(waypoint) for waypoint in zip(pos_waypoints, quat_waypoints)]
+ joints = list(self.robot.joints.values())
+
+ for joint_pos in joint_space_data:
+ # Check if the movement can be done roughly linearly.
+ current_joint_positions = self.robot.get_joint_positions()[self._manipulation_control_idx]
+
+ failed_joints = []
+ for joint_idx, target_joint_pos, current_joint_pos in zip(self._manipulation_control_idx, joint_pos, current_joint_positions):
+ if np.abs(target_joint_pos - current_joint_pos) > m.MAX_ALLOWED_JOINT_ERROR_FOR_LINEAR_MOTION:
+ failed_joints.append(joints[joint_idx].joint_name)
+
+ if failed_joints:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.EXECUTION_ERROR,
+ "You cannot reach the target position in a straight line - it requires rotating your arm which might cause collisions. You might need to get closer and retry",
+ {"failed joints": failed_joints}
+ )
+
+ # Otherwise, move the joint
+ yield from self._move_hand_direct_joint(joint_pos, stop_on_contact=stop_on_contact, ignore_failure=ignore_failure)
+
+ # Also decide if we can stop early.
+ current_pos, current_orn = self.robot.eef_links[self.arm].get_position_orientation()
+ pos_diff = np.linalg.norm(np.array(current_pos) - np.array(target_pose[0]))
+ orn_diff = (Rotation.from_quat(current_orn) * Rotation.from_quat(target_pose[1]).inv()).magnitude()
+ if pos_diff < 0.001 and orn_diff < np.deg2rad(0.1):
+ return
+
+ if stop_on_contact and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False):
+ return
+
+ if not ignore_failure:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.EXECUTION_ERROR,
+ "Your hand was obstructed from moving to the desired world position"
+ )
+
+ def _execute_grasp(self):
+ """
+ Yields action for the robot to grasp
+
+ Returns:
+ np.array or None: Action array for one step for the robot to grasp or None if its done grasping
+ """
+ for _ in range(m.MAX_STEPS_FOR_GRASP_OR_RELEASE):
+ action = self._empty_action()
+ controller_name = "gripper_{}".format(self.arm)
+ action[self.robot.controller_action_idx[controller_name]] = -1.0
+ yield self._postprocess_action(action)
+
+ def _execute_release(self):
+ """
+ Yields action for the robot to release its grasp
+
+ Returns:
+ np.array or None: Action array for one step for the robot to release or None if its done releasing
+ """
+ for _ in range(m.MAX_STEPS_FOR_GRASP_OR_RELEASE):
+ action = self._empty_action()
+ controller_name = "gripper_{}".format(self.arm)
+ action[self.robot.controller_action_idx[controller_name]] = 1.0
+ yield self._postprocess_action(action)
+
+ if self._get_obj_in_hand() is not None:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.EXECUTION_ERROR,
+ "An object was still detected in your hand after executing release",
+ {"object in hand": self._get_obj_in_hand().name},
+ )
+
+ def _overwrite_head_action(self, action):
+ """
+ Overwrites camera control actions to track an object of interest.
+ If self._always_track_eef is true, always tracks the end effector of the robot.
+ Otherwise, tracks the object of interest or the end effector as specified by the primitive.
+
+ Args:
+ action (array) : action array to overwrite
+ """
+ if self._always_track_eef:
+ target_obj_pose = (self.robot.get_eef_position(), self.robot.get_eef_orientation())
+ else:
+ if self._tracking_object is None:
+ return action
+
+ if self._tracking_object == self.robot:
+ target_obj_pose = (self.robot.get_eef_position(), self.robot.get_eef_orientation())
+ else:
+ target_obj_pose = self._tracking_object.get_position_orientation()
+
+ assert self.robot_model == "Tiago", "Tracking object with camera is currently only supported for Tiago"
+
+ head_q = self._get_head_goal_q(target_obj_pose)
+ head_idx = self.robot.controller_action_idx["camera"]
+
+ config = self.robot._controller_config["camera"]
+ assert config["name"] == "JointController", "Camera controller must be JointController"
+ assert config["motor_type"] == "position", "Camera controller must be in position control mode"
+ use_delta = config["use_delta_commands"]
+
+ if use_delta:
+ cur_head_q = self.robot.get_joint_positions()[self.robot.camera_control_idx]
+ head_action = head_q - cur_head_q
+ else:
+ head_action = head_q
+ action[head_idx] = head_action
+ return action
+
+ def _get_head_goal_q(self, target_obj_pose):
+ """
+ Get goal joint positions for head to look at an object of interest,
+ If the object cannot be seen, return the current head joint positions.
+ """
+
+ # get current head joint positions
+ head1_joint = self.robot.joints["head_1_joint"]
+ head2_joint = self.robot.joints["head_2_joint"]
+ head1_joint_limits = [head1_joint.lower_limit, head1_joint.upper_limit]
+ head2_joint_limits = [head2_joint.lower_limit, head2_joint.upper_limit]
+ head1_joint_goal = head1_joint.get_state()[0][0]
+ head2_joint_goal = head2_joint.get_state()[0][0]
+
+ # grab robot and object poses
+ robot_pose = self.robot.get_position_orientation()
+ # obj_pose = obj.get_position_orientation()
+ obj_in_base = T.relative_pose_transform(*target_obj_pose, *robot_pose)
+
+ # compute angle between base and object in xy plane (parallel to floor)
+ theta = np.arctan2(obj_in_base[0][1], obj_in_base[0][0])
+
+ # if it is possible to get object in view, compute both head joint positions
+ if head1_joint_limits[0] < theta < head1_joint_limits[1]:
+ head1_joint_goal = theta
+
+ # compute angle between base and object in xz plane (perpendicular to floor)
+ head2_pose = self.robot.links["head_2_link"].get_position_orientation()
+ head2_in_base = T.relative_pose_transform(*head2_pose, *robot_pose)
+
+ phi = np.arctan2(obj_in_base[0][2] - head2_in_base[0][2], obj_in_base[0][0])
+ if head2_joint_limits[0] < phi < head2_joint_limits[1]:
+ head2_joint_goal = phi
+
+ # if not possible to look at object, return current head joint positions
+ else:
+ default_head_pos = self._get_reset_joint_pos()[self.robot.controller_action_idx["camera"]]
+ head1_joint_goal = default_head_pos[0]
+ head2_joint_goal = default_head_pos[1]
+
+ return [head1_joint_goal, head2_joint_goal]
+
+ def _empty_action(self):
+ """
+ Get a no-op action that allows us to run simulation without changing robot configuration.
+
+ Returns:
+ np.array or None: Action array for one step for the robot to do nothing
+ """
+ action = np.zeros(self.robot.action_dim)
+ for name, controller in self.robot._controllers.items():
+ joint_idx = controller.dof_idx
+ action_idx = self.robot.controller_action_idx[name]
+ if controller.control_type == ControlType.POSITION and len(joint_idx) == len(action_idx) and not controller.use_delta_commands:
+ action[action_idx] = self.robot.get_joint_positions()[joint_idx]
+ elif self.robot._controller_config[name]["name"] == "InverseKinematicsController":
+ # overwrite the goal orientation, since it is in absolute frame.
+ assert self.robot._controller_config["arm_" + self.arm]["mode"] == "pose_absolute_ori", "Controller must be in pose_delta_ori mode"
+ current_quat = self.robot.get_relative_eef_orientation()
+ current_ori = T.quat2axisangle(current_quat)
+ control_idx = self.robot.controller_action_idx["arm_" + self.arm]
+ action[control_idx[3:]] = current_ori
+
+ return action
+
+ def _reset_hand(self):
+ """
+ Yields action to move the hand to the position optimal for executing subsequent action primitives
+
+ Returns:
+ np.array or None: Action array for one step for the robot to reset its hand or None if it is done resetting
+ """
+ controller_config = self.robot._controller_config["arm_" + self.arm]
+ if controller_config["name"] == "InverseKinematicsController":
+ indented_print("Resetting hand")
+ reset_eef_pose = self._get_reset_eef_pose()
+ try:
+ yield from self._move_hand_ik(reset_eef_pose)
+ except ActionPrimitiveError:
+ indented_print("Could not do a planned reset of the hand - probably obj_in_hand collides with body")
+ yield from self._move_hand_direct_ik(reset_eef_pose, ignore_failure=True, in_world_frame=False)
+ else:
+ indented_print("Resetting hand")
+ reset_pose = self._get_reset_joint_pos()[self._manipulation_control_idx]
+ try:
+ yield from self._move_hand_joint(reset_pose)
+ except ActionPrimitiveError:
+ indented_print("Could not do a planned reset of the hand - probably obj_in_hand collides with body")
+ yield from self._move_hand_direct_joint(reset_pose, ignore_failure=True)
+
+ def _get_reset_eef_pose(self):
+ # TODO: Add support for Fetch
+ if self.robot_model == "Tiago":
+ return np.array([0.28493954, 0.37450749, 1.1512334]), np.array([-0.21533823, 0.05361032, -0.08631776, 0.97123871])
+ else:
+ raise NotImplementedError
+
+ def _get_reset_joint_pos(self):
+ reset_pose_fetch = np.array(
+ [
+ 0.0,
+ 0.0, # wheels
+ 0.0, # trunk
+ 0.0,
+ -1.0,
+ 0.0, # head
+ -1.0,
+ 1.53448,
+ 2.2,
+ 0.0,
+ 1.36904,
+ 1.90996, # arm
+ 0.05,
+ 0.05, # gripper
+ ]
+ )
+
+ reset_pose_tiago = np.array([
+ -1.78029833e-04,
+ 3.20231302e-05,
+ -1.85759447e-07,
+ 0.0,
+ -0.2,
+ 0.0,
+ 0.1,
+ -6.10000000e-01,
+ -1.10000000e+00,
+ 0.00000000e+00,
+ -1.10000000e+00,
+ 1.47000000e+00,
+ 0.00000000e+00,
+ 8.70000000e-01,
+ 2.71000000e+00,
+ 1.50000000e+00,
+ 1.71000000e+00,
+ -1.50000000e+00,
+ -1.57000000e+00,
+ 4.50000000e-01,
+ 1.39000000e+00,
+ 0.00000000e+00,
+ 0.00000000e+00,
+ 4.50000000e-02,
+ 4.50000000e-02,
+ 4.50000000e-02,
+ 4.50000000e-02
+ ])
+ return reset_pose_tiago if self.robot_model == "Tiago" else reset_pose_fetch
+
+ def _navigate_to_pose(self, pose_2d):
+ """
+ Yields the action to navigate robot to the specified 2d pose
+
+ Args:
+ pose_2d (Iterable): (x, y, yaw) 2d pose
+
+ Returns:
+ np.array or None: Action array for one step for the robot to navigate or None if it is done navigating
+ """
+ with PlanningContext(self.robot, self.robot_copy, "simplified") as context:
+ plan = plan_base_motion(
+ robot=self.robot,
+ end_conf=pose_2d,
+ context=context,
+ )
+
+ if plan is None:
+ # TODO: Would be great to produce a more informative error.
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PLANNING_ERROR,
+ "Could not make a navigation plan to get to the target position"
+ )
+
+ # self._draw_plan(plan)
+ # Follow the plan to navigate.
+ indented_print("Plan has %d steps", len(plan))
+ for i, pose_2d in enumerate(plan):
+ indented_print("Executing navigation plan step %d/%d", i + 1, len(plan))
+ low_precision = True if i < len(plan) - 1 else False
+ yield from self._navigate_to_pose_direct(pose_2d, low_precision=low_precision)
+
+ def _draw_plan(self, plan):
+ SEARCHED = []
+ trav_map = self.env.scene._trav_map
+ for q in plan:
+ # The below code is useful for plotting the RRT tree.
+ SEARCHED.append(np.flip(trav_map.world_to_map((q[0], q[1]))))
+
+ fig = plt.figure()
+ plt.imshow(trav_map.floor_map[0])
+ plt.scatter(*zip(*SEARCHED), 5)
+ fig.canvas.draw()
+
+ # Convert the canvas to image
+ img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
+ img = img.reshape(fig.canvas.get_width_height()[::-1] + (3,))
+ plt.close(fig)
+
+ # Convert to BGR for cv2-based viewing.
+ img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
+
+ cv2.imshow("SceneGraph", img)
+ cv2.waitKey(1)
+
+ def _navigate_if_needed(self, obj, pose_on_obj=None, **kwargs):
+ """
+ Yields action to navigate the robot to be in range of the object if it not in the range
+
+ Args:
+ obj (StatefulObject): Object for the robot to be in range of
+ pose_on_obj (Iterable): (pos, quat) Pose
+
+ Returns:
+ np.array or None: Action array for one step for the robot to navigate or None if it is done navigating
+ """
+ if pose_on_obj is not None:
+ if self._target_in_reach_of_robot(pose_on_obj):
+ # No need to navigate.
+ return
+ elif self._target_in_reach_of_robot(obj.get_position_orientation()):
+ return
+
+ yield from self._navigate_to_obj(obj, pose_on_obj=pose_on_obj, **kwargs)
+
+ def _navigate_to_obj(self, obj, pose_on_obj=None, **kwargs):
+ """
+ Yields action to navigate the robot to be in range of the pose
+
+ Args:
+ obj (StatefulObject): object to be in range of
+ pose_on_obj (Iterable): (pos, quat) pose
+
+ Returns:
+ np.array or None: Action array for one step for the robot to navigate in range or None if it is done navigating
+ """
+ pose = self._sample_pose_near_object(obj, pose_on_obj=pose_on_obj, **kwargs)
+ yield from self._navigate_to_pose(pose)
+
+ def _navigate_to_pose_direct(self, pose_2d, low_precision=False):
+ """
+ Yields action to navigate the robot to the 2d pose without planning
+
+ Args:
+ pose_2d (Iterable): (x, y, yaw) 2d pose
+ low_precision (bool): Determines whether to navigate to the pose within a large range (low precision) or small range (high precison)
+
+ Returns:
+ np.array or None: Action array for one step for the robot to navigate or None if it is done navigating
+ """
+ dist_threshold = m.LOW_PRECISION_DIST_THRESHOLD if low_precision else m.DEFAULT_DIST_THRESHOLD
+ angle_threshold = m.LOW_PRECISION_ANGLE_THRESHOLD if low_precision else m.DEFAULT_ANGLE_THRESHOLD
+
+ end_pose = self._get_robot_pose_from_2d_pose(pose_2d)
+ body_target_pose = self._get_pose_in_robot_frame(end_pose)
+
+ for _ in range(m.MAX_STEPS_FOR_WAYPOINT_NAVIGATION):
+ if np.linalg.norm(body_target_pose[0][:2]) < dist_threshold:
+ break
+
+ diff_pos = end_pose[0] - self.robot.get_position()
+ intermediate_pose = (end_pose[0], T.euler2quat([0, 0, np.arctan2(diff_pos[1], diff_pos[0])]))
+ body_intermediate_pose = self._get_pose_in_robot_frame(intermediate_pose)
+ diff_yaw = T.quat2euler(body_intermediate_pose[1])[2]
+ if abs(diff_yaw) > m.DEFAULT_ANGLE_THRESHOLD:
+ yield from self._rotate_in_place(intermediate_pose, angle_threshold=m.DEFAULT_ANGLE_THRESHOLD)
+ else:
+ action = self._empty_action()
+ if self._base_controller_is_joint:
+ direction_vec = body_target_pose[0][:2] / np.linalg.norm(body_target_pose[0][:2]) * m.KP_LIN_VEL
+ base_action = [direction_vec[0], direction_vec[1], 0.0]
+ action[self.robot.controller_action_idx["base"]] = base_action
+ else:
+ base_action = [m.KP_LIN_VEL, 0.0]
+ action[self.robot.controller_action_idx["base"]] = base_action
+ yield self._postprocess_action(action)
+
+ body_target_pose = self._get_pose_in_robot_frame(end_pose)
+ else:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.EXECUTION_ERROR,
+ "Could not navigate to the target position",
+ {"target pose": end_pose},
+ )
+
+ # Rotate in place to final orientation once at location
+ yield from self._rotate_in_place(end_pose, angle_threshold=angle_threshold)
+
+ def _rotate_in_place(self, end_pose, angle_threshold = m.DEFAULT_ANGLE_THRESHOLD):
+ """
+ Yields action to rotate the robot to the 2d end pose
+
+ Args:
+ end_pose (Iterable): (x, y, yaw) 2d pose
+ angle_threshold (float): The angle difference between the robot's current and end pose that determines when the robot is done rotating
+
+ Returns:
+ np.array or None: Action array for one step for the robot to rotate or None if it is done rotating
+ """
+ body_target_pose = self._get_pose_in_robot_frame(end_pose)
+ diff_yaw = T.quat2euler(body_target_pose[1])[2]
+
+ for _ in range(m.MAX_STEPS_FOR_WAYPOINT_NAVIGATION):
+ if abs(diff_yaw) < angle_threshold:
+ break
+
+ action = self._empty_action()
+
+ direction = -1.0 if diff_yaw < 0.0 else 1.0
+ ang_vel = m.KP_ANGLE_VEL * direction
+
+ base_action = [0.0, 0.0, ang_vel] if self._base_controller_is_joint else [0.0, ang_vel]
+ action[self.robot.controller_action_idx["base"]] = base_action
+ yield self._postprocess_action(action)
+
+ body_target_pose = self._get_pose_in_robot_frame(end_pose)
+ diff_yaw = T.quat2euler(body_target_pose[1])[2]
+ else:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.EXECUTION_ERROR,
+ "Could not rotate in place to the desired orientation",
+ {"target pose": end_pose},
+ )
+
+ empty_action = self._empty_action()
+ yield self._postprocess_action(empty_action)
+
+ def _sample_pose_near_object(self, obj, pose_on_obj=None, **kwargs):
+ """
+ Returns a 2d pose for the robot within in the range of the object and where the robot is not in collision with anything
+
+ Args:
+ obj (StatefulObject): Object to sample a 2d pose near
+ pose_on_obj (Iterable of arrays or None): The pose to sample near
+
+ Returns:
+ 2-tuple:
+ - 3-array: (x,y,z) Position in the world frame
+ - 4-array: (x,y,z,w) Quaternion orientation in the world frame
+ """
+ with PlanningContext(self.robot, self.robot_copy, "simplified") as context:
+ for _ in range(m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT):
+ if pose_on_obj is None:
+ pos_on_obj = self._sample_position_on_aabb_side(obj)
+ pose_on_obj = [pos_on_obj, np.array([0, 0, 0, 1])]
+
+ distance = np.random.uniform(0.0, 5.0)
+ yaw = np.random.uniform(-np.pi, np.pi)
+ avg_arm_workspace_range = np.mean(self.robot.arm_workspace_range[self.arm])
+ pose_2d = np.array(
+ [pose_on_obj[0][0] + distance * np.cos(yaw), pose_on_obj[0][1] + distance * np.sin(yaw), yaw + np.pi - avg_arm_workspace_range]
+ )
+ # Check room
+ obj_rooms = obj.in_rooms if obj.in_rooms else [self.env.scene._seg_map.get_room_instance_by_point(pose_on_obj[0][:2])]
+ if self.env.scene._seg_map.get_room_instance_by_point(pose_2d[:2]) not in obj_rooms:
+ indented_print("Candidate position is in the wrong room.")
+ continue
+
+ if not self._test_pose(pose_2d, context, pose_on_obj=pose_on_obj, **kwargs):
+ continue
+
+ return pose_2d
+
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.SAMPLING_ERROR, "Could not find valid position near object.",
+ {"target object": obj.name, "target pos": obj.get_position(), "pose on target": pose_on_obj}
+ )
+
+ @staticmethod
+ def _sample_position_on_aabb_side(target_obj):
+ """
+ Returns a position on one of the axis-aligned bounding box (AABB) side faces of the target object.
+
+ Args:
+ target_obj (StatefulObject): Object to sample a position on
+
+ Returns:
+ 3-array: (x,y,z) Position in the world frame
+ """
+ aabb_center, aabb_extent = target_obj.aabb_center, target_obj.aabb_extent
+ # We want to sample only from the side-facing faces.
+ face_normal_axis = random.choice([0, 1])
+ face_normal_direction = random.choice([-1, 1])
+ face_center = aabb_center + np.eye(3)[face_normal_axis] * aabb_extent * face_normal_direction
+ face_lateral_axis = 0 if face_normal_axis == 1 else 1
+ face_lateral_half_extent = np.eye(3)[face_lateral_axis] * aabb_extent / 2
+ face_vertical_half_extent = np.eye(3)[2] * aabb_extent / 2
+ face_min = face_center - face_vertical_half_extent - face_lateral_half_extent
+ face_max = face_center + face_vertical_half_extent + face_lateral_half_extent
+ return np.random.uniform(face_min, face_max)
+
+ # def _sample_pose_in_room(self, room: str):
+ # """
+ # Returns a pose for the robot within in the room where the robot is not in collision with anything
+
+ # Args:
+ # room (str): Name of room
+
+ # Returns:
+ # 2-tuple:
+ # - 3-array: (x,y,z) Position in the world frame
+ # - 4-array: (x,y,z,w) Quaternion orientation in the world frame
+ # """
+ # # TODO(MP): Bias the sampling near the agent.
+ # for _ in range(m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_IN_ROOM):
+ # _, pos = self.env.scene.get_random_point_by_room_instance(room)
+ # yaw = np.random.uniform(-np.pi, np.pi)
+ # pose = (pos[0], pos[1], yaw)
+ # if self._test_pose(pose):
+ # return pose
+
+ # raise ActionPrimitiveError(
+ # ActionPrimitiveError.Reason.SAMPLING_ERROR,
+ # "Could not find valid position in the given room to travel to",
+ # {"room": room}
+ # )
+
+ def _sample_pose_with_object_and_predicate(self, predicate, held_obj, target_obj, near_poses=None, near_poses_threshold=None):
+ """
+ Returns a pose for the held object relative to the target object that satisfies the predicate
+
+ Args:
+ predicate (object_states.OnTop or object_states.Inside): Relation between held object and the target object
+ held_obj (StatefulObject): Object held by the robot
+ target_obj (StatefulObject): Object to sample a pose relative to
+ near_poses (Iterable of arrays): Poses in the world frame to sample near
+ near_poses_threshold (float): The distance threshold to check if the sampled pose is near the poses in near_poses
+
+ Returns:
+ 2-tuple:
+ - 3-array: (x,y,z) Position in the world frame
+ - 4-array: (x,y,z,w) Quaternion orientation in the world frame
+ """
+ pred_map = {object_states.OnTop: "onTop", object_states.Inside: "inside"}
+
+ for _ in range(m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_WITH_OBJECT_AND_PREDICATE):
+ _, _, bb_extents, bb_center_in_base = held_obj.get_base_aligned_bbox()
+ sampling_results = sample_cuboid_for_predicate(pred_map[predicate], target_obj, bb_extents)
+ if sampling_results[0][0] is None:
+ continue
+ sampled_bb_center = sampling_results[0][0] + np.array([0, 0, m.PREDICATE_SAMPLING_Z_OFFSET])
+ sampled_bb_orn = sampling_results[0][2]
+
+ # Get the object pose by subtracting the offset
+ sampled_obj_pose = T.pose2mat((sampled_bb_center, sampled_bb_orn)) @ T.pose_inv(T.pose2mat((bb_center_in_base, [0, 0, 0, 1])))
+
+ # Check that the pose is near one of the poses in the near_poses list if provided.
+ if near_poses:
+ sampled_pos = np.array([sampled_obj_pose[0]])
+ if not np.any(np.linalg.norm(near_poses - sampled_pos, axis=1) < near_poses_threshold):
+ continue
+
+ # Return the pose
+ return T.mat2pose(sampled_obj_pose)
+
+ # If we get here, sampling failed.
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.SAMPLING_ERROR,
+ "Could not find a position to put this object in the desired relation to the target object",
+ {"target object": target_obj.name, "object in hand": held_obj.name, "relation": pred_map[predicate]},
+ )
+
+ # TODO: Why do we need to pass in the context here?
+ def _test_pose(self, pose_2d, context, pose_on_obj=None):
+ """
+ Determines whether the robot can reach the pose on the object and is not in collision at the specified 2d pose
+
+ Args:
+ pose_2d (Iterable): (x, y, yaw) 2d pose
+ context (Context): Planning context reference
+ pose_on_obj (Iterable of arrays): Pose on the object in the world frame
+
+ Returns:
+ bool: True if the robot is in a valid pose, False otherwise
+ """
+ pose = self._get_robot_pose_from_2d_pose(pose_2d)
+ if pose_on_obj is not None:
+ relative_pose = T.relative_pose_transform(*pose_on_obj, *pose)
+ if not self._target_in_reach_of_robot_relative(relative_pose):
+ return False
+
+ if set_base_and_detect_collision(context, pose):
+ indented_print("Candidate position failed collision test.")
+ return False
+ return True
+
+ @staticmethod
+ def _get_robot_pose_from_2d_pose(pose_2d):
+ """
+ Gets 3d pose from 2d pose
+
+ Args:
+ pose_2d (Iterable): (x, y, yaw) 2d pose
+
+ Returns:
+ 2-tuple:
+ - 3-array: (x,y,z) Position in the world frame
+ - 4-array: (x,y,z,w) Quaternion orientation in the world frame
+ """
+ pos = np.array([pose_2d[0], pose_2d[1], m.DEFAULT_BODY_OFFSET_FROM_FLOOR])
+ orn = T.euler2quat([0, 0, pose_2d[2]])
+ return pos, orn
+
+ def _get_pose_in_robot_frame(self, pose):
+ """
+ Converts the pose in the world frame to the robot frame
+
+ Args:
+ pose_2d (Iterable): (x, y, yaw) 2d pose
+
+ Returns:
+ 2-tuple:
+ - 3-array: (x,y,z) Position in the world frame
+ - 4-array: (x,y,z,w) Quaternion orientation in the world frame
+ """
+ body_pose = self.robot.get_position_orientation()
+ return T.relative_pose_transform(*pose, *body_pose)
+
+ def _get_hand_pose_for_object_pose(self, desired_pose):
+ """
+ Gets the pose of the hand for the desired object pose
+
+ Args:
+ desired_pose (Iterable of arrays): Pose of the object in the world frame
+
+ Returns:
+ 2-tuple:
+ - 3-array: (x,y,z) Position of the hand in the world frame
+ - 4-array: (x,y,z,w) Quaternion orientation of the hand in the world frame
+ """
+ obj_in_hand = self._get_obj_in_hand()
+
+ assert obj_in_hand is not None
+
+ # Get the object pose & the robot hand pose
+ obj_in_world = obj_in_hand.get_position_orientation()
+ hand_in_world = self.robot.eef_links[self.arm].get_position_orientation()
+
+ # Get the hand pose relative to the obj pose
+ hand_in_obj = T.relative_pose_transform(*hand_in_world, *obj_in_world)
+
+ # Now apply desired obj pose.
+ desired_hand_pose = T.pose_transform(*desired_pose, *hand_in_obj)
+
+ return desired_hand_pose
+
+ # Function that is particularly useful for Fetch, where it gives time for the base of robot to settle due to its uneven base.
+ def _settle_robot(self):
+ """
+ Yields a no op action for a few steps to allow the robot and physics to settle
+
+ Returns:
+ np.array or None: Action array for one step for the robot to do nothing
+ """
+ for _ in range(30):
+ empty_action = self._empty_action()
+ yield self._postprocess_action(empty_action)
+
+ for _ in range(m.MAX_STEPS_FOR_SETTLING):
+ if np.linalg.norm(self.robot.get_linear_velocity()) < 0.01:
+ break
+ empty_action = self._empty_action()
+ yield self._postprocess_action(empty_action)
\ No newline at end of file
diff --git a/omnigibson/action_primitives/symbolic_semantic_action_primitives.py b/omnigibson/action_primitives/symbolic_semantic_action_primitives.py
new file mode 100644
index 000000000..8e7442845
--- /dev/null
+++ b/omnigibson/action_primitives/symbolic_semantic_action_primitives.py
@@ -0,0 +1,562 @@
+"""
+WARNING!
+A set of action primitives that work without executing low-level physics but instead teleporting
+objects directly into their post-condition states. Useful for learning high-level methods.
+"""
+
+from aenum import IntEnum, auto
+
+import numpy as np
+from omnigibson.robots.robot_base import BaseRobot
+from omnigibson.systems.system_base import REGISTERED_SYSTEMS
+from omnigibson.transition_rules import REGISTERED_RULES, TransitionRuleAPI
+
+from omnigibson import object_states
+from omnigibson.action_primitives.action_primitive_set_base import ActionPrimitiveError, ActionPrimitiveErrorGroup
+from omnigibson.action_primitives.starter_semantic_action_primitives import StarterSemanticActionPrimitives
+from omnigibson.objects import DatasetObject
+
+
+class SymbolicSemanticActionPrimitiveSet(IntEnum):
+ _init_ = 'value __doc__'
+ GRASP = auto(), "Grasp an object"
+ PLACE_ON_TOP = auto(), "Place the currently grasped object on top of another object"
+ PLACE_INSIDE = auto(), "Place the currently grasped object inside another object"
+ OPEN = auto(), "Open an object"
+ CLOSE = auto(), "Close an object"
+ TOGGLE_ON = auto(), "Toggle an object on"
+ TOGGLE_OFF = auto(), "Toggle an object off"
+ SOAK_UNDER = auto(), "Soak the currently grasped object under a fluid source."
+ SOAK_INSIDE = auto(), "Soak the currently grasped object inside the fluid within a container."
+ WIPE = auto(), "Wipe the given object with the currently grasped object."
+ CUT = auto(), "Cut (slice or dice) the given object with the currently grasped object."
+ PLACE_NEAR_HEATING_ELEMENT = auto(), "Place the currently grasped object near the heating element of another object."
+ NAVIGATE_TO = auto(), "Navigate to an object"
+ RELEASE = auto(), "Release an object, letting it fall to the ground. You can then grasp it again, as a way of reorienting your grasp of the object."
+
+class SymbolicSemanticActionPrimitives(StarterSemanticActionPrimitives):
+ def __init__(self, env):
+ super().__init__(env)
+ self.controller_functions = {
+ SymbolicSemanticActionPrimitiveSet.GRASP: self._grasp,
+ SymbolicSemanticActionPrimitiveSet.PLACE_ON_TOP: self._place_on_top,
+ SymbolicSemanticActionPrimitiveSet.PLACE_INSIDE: self._place_inside,
+ SymbolicSemanticActionPrimitiveSet.OPEN: self._open,
+ SymbolicSemanticActionPrimitiveSet.CLOSE: self._close,
+ SymbolicSemanticActionPrimitiveSet.TOGGLE_ON: self._toggle_on,
+ SymbolicSemanticActionPrimitiveSet.TOGGLE_OFF: self._toggle_off,
+ SymbolicSemanticActionPrimitiveSet.SOAK_UNDER: self._soak_under,
+ SymbolicSemanticActionPrimitiveSet.SOAK_INSIDE: self._soak_inside,
+ SymbolicSemanticActionPrimitiveSet.WIPE: self._wipe,
+ SymbolicSemanticActionPrimitiveSet.CUT: self._cut,
+ SymbolicSemanticActionPrimitiveSet.PLACE_NEAR_HEATING_ELEMENT: self._place_near_heating_element,
+ SymbolicSemanticActionPrimitiveSet.NAVIGATE_TO: self._navigate_to_obj,
+ SymbolicSemanticActionPrimitiveSet.RELEASE: self._release,
+ }
+
+ def apply_ref(self, prim, *args, attempts=3):
+ """
+ Yields action for robot to execute the primitive with the given arguments.
+
+ Args:
+ prim (SymbolicSemanticActionPrimitiveSet): Primitive to execute
+ args: Arguments for the primitive
+ attempts (int): Number of attempts to make before raising an error
+
+ Returns:
+ np.array or None: Action array for one step for the robot tto execute the primitve or None if primitive completed
+
+ Raises:
+ ActionPrimitiveError: If primitive fails to execute
+ """
+ assert attempts > 0, "Must make at least one attempt"
+ ctrl = self.controller_functions[prim]
+
+ if any(isinstance(arg, BaseRobot) for arg in args):
+ raise ActionPrimitiveErrorGroup([
+ ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
+ "Cannot call a symbolic semantic action primitive with a robot as an argument."
+ )
+ ])
+
+ errors = []
+ for _ in range(attempts):
+ # Attempt
+ success = False
+ try:
+ yield from ctrl(*args)
+ success = True
+ except ActionPrimitiveError as e:
+ errors.append(e)
+
+ try:
+ # Settle before returning.
+ yield from self._settle_robot()
+ except ActionPrimitiveError:
+ pass
+
+ # Stop on success
+ if success:
+ return
+
+ raise ActionPrimitiveErrorGroup(errors)
+
+ def _open_or_close(self, obj, should_open):
+ if self._get_obj_in_hand():
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
+ "Cannot open or close an object while holding an object",
+ {"object in hand": self._get_obj_in_hand().name},
+ )
+
+ if object_states.Open not in obj.states:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
+ "The target object is not openable.",
+ {"target object": obj.name}
+ )
+
+ # Don't do anything if the object is already closed and we're trying to close.
+ if should_open == obj.states[object_states.Open].get_value():
+ return
+
+ # Set the value
+ obj.states[object_states.Open].set_value(should_open)
+
+ # Settle
+ yield from self._settle_robot()
+
+ if obj.states[object_states.Open].get_value() != should_open:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.POST_CONDITION_ERROR,
+ "The object did not open or close as expected. Maybe try again",
+ {"target object": obj.name, "is it currently open": obj.states[object_states.Open].get_value()},
+ )
+
+ def _grasp(self, obj: DatasetObject):
+ """
+ Yields action for the robot to navigate to object if needed, then to grasp it
+
+ Args:
+ DatasetObject: Object for robot to grasp
+
+ Returns:
+ np.array or None: Action array for one step for the robot to grasp or None if grasp completed
+ """
+ # Don't do anything if the object is already grasped.
+ obj_in_hand = self._get_obj_in_hand()
+ if obj_in_hand is not None:
+ if obj_in_hand == obj:
+ return
+ else:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
+ "Cannot grasp when your hand is already full",
+ {"target object": obj.name, "object currently in hand": obj_in_hand.name},
+ )
+
+ # Get close
+ # yield from self._navigate_if_needed(obj)
+
+ # Perform forced assisted grasp
+ obj.set_position(self.robot.get_eef_position(self.arm))
+ self.robot._establish_grasp(self.arm, (obj, obj.root_link), obj.get_position())
+
+ # Execute for a moment
+ yield from self._settle_robot()
+
+ # Verify
+ if self._get_obj_in_hand() is None:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.POST_CONDITION_ERROR,
+ "Grasp completed, but no object detected in hand after executing grasp",
+ {"target object": obj.name},
+ )
+
+ if self._get_obj_in_hand() != obj:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.POST_CONDITION_ERROR,
+ "An unexpected object was detected in hand after executing grasp. Consider releasing it",
+ {"expected object": obj.name, "actual object": self._get_obj_in_hand().name},
+ )
+
+ def _release(self):
+ if not self._get_obj_in_hand():
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
+ "Cannot release an object if you're not already holding an object",
+ )
+
+ self.robot.release_grasp_immediately()
+ yield from self._settle_robot()
+
+ def _toggle(self, obj, value):
+ if self._get_obj_in_hand():
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
+ "Cannot toggle an object while holding an object",
+ {"object in hand": self._get_obj_in_hand()},
+ )
+
+ if object_states.ToggledOn not in obj.states:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
+ "The target object is not toggleable.",
+ {"target object": obj.name}
+ )
+
+ if obj.states[object_states.ToggledOn].get_value() == value:
+ return
+
+ # Call the setter
+ obj.states[object_states.ToggledOn].set_value(value)
+
+ # Yield some actions
+ yield from self._settle_robot()
+
+ # Check that it actually happened
+ if obj.states[object_states.ToggledOn].get_value() != value:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.POST_CONDITION_ERROR,
+ "The object did not toggle as expected - maybe try again",
+ {"target object": obj.name, "is it currently toggled on": obj.states[object_states.ToggledOn].get_value()}
+ )
+
+ def _place_with_predicate(self, obj, predicate, near_poses=None, near_poses_threshold=None):
+ """
+ Yields action for the robot to navigate to the object if needed, then to place it
+
+ Args:
+ obj (StatefulObject): Object for robot to place the object in its hand on
+ predicate (object_states.OnTop or object_states.Inside): Determines whether to place on top or inside
+
+ Returns:
+ np.array or None: Action array for one step for the robot to place or None if place completed
+ """
+ obj_in_hand = self._get_obj_in_hand()
+ if obj_in_hand is None:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, "You need to be grasping an object first to place it somewhere."
+ )
+
+ # Find a spot to put it
+ obj_pose = self._sample_pose_with_object_and_predicate(predicate, obj_in_hand, obj, near_poses=near_poses, near_poses_threshold=near_poses_threshold)
+
+ # Get close, release the object.
+ # yield from self._navigate_if_needed(obj, pose_on_obj=obj_pose)
+ yield from self._release()
+
+ # Actually move the object to the spot and step a bit to settle it.
+ obj_in_hand.set_position_orientation(*obj_pose)
+ yield from self._settle_robot()
+
+ if not obj_in_hand.states[predicate].get_value(obj):
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.EXECUTION_ERROR,
+ "Failed to place object at the desired place (probably dropped). The object was still released, so you need to grasp it again to continue",
+ {"dropped object": obj_in_hand.name, "target object": obj.name}
+ )
+
+ def _soak_under(self, obj):
+ # Check that our current object is a particle remover
+ obj_in_hand = self._get_obj_in_hand()
+ if obj_in_hand is None:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, "You need to be grasping a soakable object first."
+ )
+
+ # Check that the target object is a particle source
+ if object_states.ParticleSource not in obj.states:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
+ "The target object is not a particle source, so you can not soak anything under it.",
+ {"target object": obj.name}
+ )
+
+ # Check if the target object has any particles in it
+ producing_systems = {ps for ps in REGISTERED_SYSTEMS.values() if obj.states[object_states.ParticleSource].check_conditions_for_system(ps)}
+ if not producing_systems:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
+ "The target object currently is not producing any particles - try toggling it on.",
+ {"target object": obj.name}
+ )
+
+ # Check that the current object can remove those particles
+ if object_states.Saturated not in obj_in_hand.states:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
+ "The currently grasped object cannot soak particles.",
+ {"object in hand": obj_in_hand.name}
+ )
+
+ supported_systems = {
+ x for x in producing_systems if obj_in_hand.states[object_states.ParticleRemover].supports_system(x)
+ }
+ if not supported_systems:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
+ "The target object only contains particles that this object cannot soak.",
+ {
+ "target object": obj.name,
+ "cleaning tool": obj_in_hand.name,
+ "particles the target object is producing": sorted(x.name for x in producing_systems),
+ "particles the grasped object can remove": sorted([x for x in obj_in_hand.states[object_states.ParticleRemover].conditions.keys()])
+ }
+ )
+
+ currently_removable_systems = {
+ x for x in supported_systems if obj_in_hand.states[object_states.ParticleRemover].check_conditions_for_system(x)
+ }
+ if not currently_removable_systems:
+ # TODO: This needs to be far more descriptive.
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
+ "The target object is covered by some particles that this object can normally soak, but needs to be in a different state to do so (e.g. toggled on, soaked by another fluid first, etc.).",
+ {
+ "target object": obj.name,
+ "cleaning tool": obj_in_hand.name,
+ "particles the target object is producing": sorted(x.name for x in producing_systems),
+ }
+ )
+
+ # If so, remove the particles.
+ for system in currently_removable_systems:
+ obj_in_hand.states[object_states.Saturated].set_value(system, True)
+
+
+ def _soak_inside(self, obj):
+ # Check that our current object is a particle remover
+ obj_in_hand = self._get_obj_in_hand()
+ if obj_in_hand is None:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, "You need to be grasping a soakable object first."
+ )
+
+ # Check that the target object is fillable
+ if object_states.Contains not in obj.states:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
+ "The target object is not fillable by particles, so you can not soak anything in it.",
+ {"target object": obj.name}
+ )
+
+ # Check if the target object has any particles in it
+ contained_systems = {ps for ps in REGISTERED_SYSTEMS.values() if obj.states[object_states.Contains].get_value(ps.states)}
+ if not contained_systems:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
+ "The target object currently does not contain any particles.",
+ {"target object": obj.name}
+ )
+
+ # Check that the current object can remove those particles
+ if object_states.Saturated not in obj_in_hand.states:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
+ "The currently grasped object cannot soak particles.",
+ {"object in hand": obj_in_hand.name}
+ )
+
+ supported_systems = {
+ x for x in contained_systems if obj_in_hand.states[object_states.ParticleRemover].supports_system(x)
+ }
+ if not supported_systems:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
+ "The target object only contains particles that this object cannot soak.",
+ {
+ "target object": obj.name,
+ "cleaning tool": obj_in_hand.name,
+ "particles the target object contains": sorted(x.name for x in contained_systems),
+ "particles the grasped object can remove": sorted([x for x in obj_in_hand.states[object_states.ParticleRemover].conditions.keys()])
+ }
+ )
+
+ currently_removable_systems = {
+ x for x in supported_systems if obj_in_hand.states[object_states.ParticleRemover].check_conditions_for_system(x)
+ }
+ if not currently_removable_systems:
+ # TODO: This needs to be far more descriptive.
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
+ "The target object is covered by some particles that this object can normally soak, but needs to be in a different state to do so (e.g. toggled on, soaked by another fluid first, etc.).",
+ {
+ "target object": obj.name,
+ "cleaning tool": obj_in_hand.name,
+ "particles the target object contains": sorted(x.name for x in contained_systems),
+ }
+ )
+
+ # If so, remove the particles.
+ for system in currently_removable_systems:
+ obj_in_hand.states[object_states.Saturated].set_value(system, True)
+
+
+ def _wipe(self, obj):
+ # Check that our current object is a particle remover
+ obj_in_hand = self._get_obj_in_hand()
+ if obj_in_hand is None:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, "You need to be grasping a wiping tool (particle remover) first to wipe an object."
+ )
+
+ # Check that the target object is coverable
+ if object_states.Covered not in obj.states:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
+ "The target object is not coverable by any particles, so there is no need to wipe it.",
+ {"target object": obj.name}
+ )
+
+ # Check if the target object has any particles on it
+ covering_systems = {ps for ps in REGISTERED_SYSTEMS.values() if obj.states[object_states.Covered].get_value(ps.states)}
+ if not covering_systems:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
+ "The target object is not covered by any particles.",
+ {"target object": obj.name}
+ )
+
+ # Check that the current object can remove those particles
+ if object_states.ParticleRemover not in obj_in_hand.states:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
+ "The currently grasped object is not a particle remover.",
+ {"object in hand": obj_in_hand.name}
+ )
+
+ supported_systems = {
+ x for x in covering_systems if obj_in_hand.states[object_states.ParticleRemover].supports_system(x)
+ }
+ if not supported_systems:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
+ "The target object is covered only by particles that this cleaning tool cannot remove.",
+ {
+ "target object": obj.name,
+ "cleaning tool": obj_in_hand.name,
+ "particles the target object is covered by": sorted(x.name for x in covering_systems),
+ "particles the grasped object can remove": sorted([x for x in obj_in_hand.states[object_states.ParticleRemover].conditions.keys()])
+ }
+ )
+
+ currently_removable_systems = {
+ x for x in supported_systems if obj_in_hand.states[object_states.ParticleRemover].check_conditions_for_system(x)
+ }
+ if not currently_removable_systems:
+ # TODO: This needs to be far more descriptive.
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
+ "The target object is covered by some particles that this cleaning tool can normally remove, but needs to be in a different state to do so (e.g. toggled on, soaked by another fluid first, etc.).",
+ {
+ "target object": obj.name,
+ "cleaning tool": obj_in_hand.name,
+ "particles the target object is covered by": sorted(x.name for x in covering_systems),
+ }
+ )
+
+ # If so, remove the particles.
+ for system in currently_removable_systems:
+ obj_in_hand.states[object_states.Covered].set_value(system, False)
+
+ def _cut(self, obj):
+ # Check that our current object is a slicer
+ obj_in_hand = self._get_obj_in_hand()
+ if obj_in_hand is None:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, "You need to be grasping a cutting tool first to slice an object."
+ )
+
+ if "slicer" not in obj_in_hand._abilities:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
+ "The current object is not a cutting tool.",
+ {"object in hand": obj_in_hand.name}
+ )
+
+ # Check that the target object is sliceable
+ if "sliceable" not in obj._abilities and "diceable" not in obj._abilities:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
+ "The target object is not sliceable or diceable.",
+ {"target object": obj.name}
+ )
+
+ # Get close
+ # yield from self._navigate_if_needed(obj)
+
+ # TODO: Do some more validation
+ added_obj_attrs = []
+ removed_objs = []
+ output = REGISTERED_RULES["SlicingRule"].transition({"sliceable": [obj]})
+ added_obj_attrs += output.add
+ removed_objs += output.remove
+
+ TransitionRuleAPI.execute_transition(added_obj_attrs=added_obj_attrs, removed_objs=removed_objs)
+ yield from self._settle_robot()
+
+ def _place_near_heating_element(self, heat_source_obj):
+ obj_in_hand = self._get_obj_in_hand()
+ if obj_in_hand is None:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, "You need to be grasping an object first to place it somewhere."
+ )
+
+ if object_states.HeatSourceOrSink not in heat_source_obj.states:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, "The target object is not a heat source or sink.", {"target object": heat_source_obj.name}
+ )
+
+ if heat_source_obj.states[object_states.HeatSourceOrSink].requires_inside:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
+ "The heat source object has no explicit heating element, it just requires the cookable object to be placed inside it.",
+ {"target object": heat_source_obj.name}
+ )
+
+ # Get the position of the heat source on the thing we're placing near
+ heating_element_positions = np.array([link.get_position() for link in heat_source_obj.states[object_states.HeatSourceOrSink].links.values()])
+ heating_distance_threshold = heat_source_obj.states[object_states.HeatSourceOrSink].distance_threshold
+
+ # Call place-with-predicate
+ yield from self._place_with_predicate(heat_source_obj, object_states.OnTop, near_poses=heating_element_positions, near_poses_threshold=heating_distance_threshold)
+
+ def _wait_for_cooked(self, obj):
+ # Check that the current object is cookable
+ if object_states.Cooked not in obj.states:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, "Target object is not cookable.",
+ {"target object": obj.name}
+ )
+
+ # Keep waiting as long as the thing is warming up.
+ prev_temp = obj.states[object_states.Temperature].get_value()
+ while not obj.states[object_states.Cooked].get_value():
+ # Pass some time
+ for _ in range(10):
+ yield from self._empty_action()
+
+ # Check that we are still heating up
+ new_temp = obj.states[object_states.Temperature].get_value()
+ if new_temp - prev_temp < 1e-2:
+ raise ActionPrimitiveError(
+ ActionPrimitiveError.PRE_CONDITION_ERROR,
+ "Target object is not currently heating up.",
+ {"target object": obj.name}
+ )
+
+ def _navigate_to_pose(self, pose_2d):
+ """
+ Yields the action to navigate robot to the specified 2d pose
+
+ Args:
+ pose_2d (Iterable): (x, y, yaw) 2d pose
+
+ Returns:
+ np.array or None: Action array for one step for the robot to navigate or None if it is done navigating
+ """
+ robot_pose = self._get_robot_pose_from_2d_pose(pose_2d)
+ self.robot.set_position_orientation(*robot_pose)
+ yield from self._settle_robot()
diff --git a/omnigibson/configs/tiago_primitives.yaml b/omnigibson/configs/tiago_primitives.yaml
new file mode 100644
index 000000000..a8ce20842
--- /dev/null
+++ b/omnigibson/configs/tiago_primitives.yaml
@@ -0,0 +1,82 @@
+env:
+ initial_pos_z_offset: 0.1
+
+render:
+ viewer_width: 1280
+ viewer_height: 720
+
+scene:
+ type: InteractiveTraversableScene
+ scene_model: Rs_int
+ trav_map_resolution: 0.1
+ trav_map_erosion: 2
+ trav_map_with_objects: true
+ build_graph: true
+ num_waypoints: 1
+ waypoint_resolution: 0.2
+ load_object_categories: null
+ not_load_object_categories: null
+ load_room_types: null
+ load_room_instances: null
+ load_task_relevant_only: false
+ seg_map_resolution: 0.1
+ scene_source: OG
+ include_robots: false
+
+robots:
+ - name: tiago
+ type: Tiago
+ obs_modalities: [scan, rgb, depth]
+ scale: 1.0
+ self_collisions: true
+ # action_normalize: false
+ action_normalize: false
+ action_type: continuous
+ grasping_mode: sticky
+ disable_grasp_handling: true
+ rigid_trunk: false
+ default_trunk_offset: 0.365
+ default_arm_pose: diagonal30
+ controller_config:
+ base:
+ name: JointController
+ motor_type: "velocity"
+ arm_left:
+ name: JointController
+ motor_type: position
+ command_input_limits: null
+ command_output_limits: null
+ use_delta_commands: false
+ arm_right:
+ name: JointController
+ motor_type: position
+ command_input_limits: null
+ command_output_limits: null
+ use_delta_commands: false
+ gripper_left:
+ name: JointController
+ motor_type: position
+ command_input_limits: [-1, 1]
+ command_output_limits: null
+ use_delta_commands: true
+ gripper_right:
+ name: JointController
+ motor_type: position
+ command_input_limits: [-1, 1]
+ command_output_limits: null
+ use_delta_commands: true
+ camera:
+ name: JointController
+ motor_type: "velocity"
+ use_delta_commands: false
+
+objects: []
+
+task:
+ type: DummyTask
+
+scene_graph:
+ egocentric: true
+ full_obs: true
+ only_true: true
+ merge_parallel_edges: false
\ No newline at end of file
diff --git a/omnigibson/controllers/ik_controller.py b/omnigibson/controllers/ik_controller.py
index 9f6c9bca1..54994ff63 100644
--- a/omnigibson/controllers/ik_controller.py
+++ b/omnigibson/controllers/ik_controller.py
@@ -1,4 +1,5 @@
import numpy as np
+from omnigibson.macros import create_module_macros
import omnigibson.utils.transform_utils as T
from omnigibson.controllers import ControlType, ManipulationController
@@ -10,6 +11,12 @@
# Create module logger
log = create_module_logger(module_name=__name__)
+# Set some macros
+m = create_module_macros(module_path=__file__)
+m.IK_POS_TOLERANCE = 0.002
+m.IK_POS_WEIGHT = 20.0
+m.IK_MAX_ITERATIONS = 100
+
# Different modes
IK_MODE_COMMAND_DIMS = {
"pose_absolute_ori": 6, # 6DOF (dx,dy,dz,ax,ay,az) control over pose, where the orientation is given in absolute axis-angle coordinates
@@ -47,6 +54,7 @@ def __init__(
mode="pose_delta_ori",
smoothing_filter_size=None,
workspace_pose_limiter=None,
+ condition_on_current_position=True,
):
"""
Args:
@@ -101,6 +109,8 @@ def limiter(command_pos: Array[float], command_quat: Array[float], control_dict:
where pos_command is (x,y,z) cartesian position values, command_quat is (x,y,z,w) quarternion orientation
values, and the returned tuple is the processed (pos, quat) command.
+ condition_on_current_position (bool): if True, will use the current joint position as the initial guess for the IK algorithm.
+ Otherwise, will use the default_joint_pos as the initial guess.
"""
# Store arguments
control_dim = len(dof_idx)
@@ -117,13 +127,14 @@ def limiter(command_pos: Array[float], command_quat: Array[float], control_dict:
self.workspace_pose_limiter = workspace_pose_limiter
self.task_name = task_name
self.default_joint_pos = default_joint_pos[dof_idx]
+ self.condition_on_current_position = condition_on_current_position
# Create the lula IKSolver
self.solver = IKSolver(
robot_description_path=robot_description_path,
robot_urdf_path=robot_urdf_path,
eef_name=eef_name,
- default_joint_pos=default_joint_pos,
+ default_joint_pos=self.default_joint_pos,
)
# Other variables that will be filled in at runtime
@@ -267,17 +278,36 @@ def _command_to_control(self, command, control_dict):
# Calculate and return IK-backed out joint angles
current_joint_pos = control_dict["joint_position"][self.dof_idx]
- target_joint_pos = self.solver.solve(
- target_pos=target_pos,
- target_quat=target_quat,
- initial_joint_pos=current_joint_pos,
- )
- if target_joint_pos is None:
- # Print warning that we couldn't find a valid solution, and return the current joint configuration
- # instead so that we execute a no-op control
- log.warning(f"Could not find valid IK configuration! Returning no-op control instead.")
+ # If the delta is really small, we just keep the current joint position. This avoids joint
+ # drift caused by IK solver inaccuracy even when zero delta actions are provided.
+ if np.allclose(pos_relative, target_pos, atol=1e-4) and np.allclose(quat_relative, target_quat, atol=1e-4):
target_joint_pos = current_joint_pos
+ else:
+ # Otherwise we try to solve for the IK configuration.
+ if self.condition_on_current_position:
+ target_joint_pos = self.solver.solve(
+ target_pos=target_pos,
+ target_quat=target_quat,
+ tolerance_pos=m.IK_POS_TOLERANCE,
+ weight_pos=m.IK_POS_WEIGHT,
+ max_iterations=m.IK_MAX_ITERATIONS,
+ initial_joint_pos=current_joint_pos,
+ )
+ else:
+ target_joint_pos = self.solver.solve(
+ target_pos=target_pos,
+ target_quat=target_quat,
+ tolerance_pos=m.IK_POS_TOLERANCE,
+ weight_pos=m.IK_POS_WEIGHT,
+ max_iterations=m.IK_MAX_ITERATIONS,
+ )
+
+ if target_joint_pos is None:
+ # Print warning that we couldn't find a valid solution, and return the current joint configuration
+ # instead so that we execute a no-op control
+ log.warning(f"Could not find valid IK configuration! Returning no-op control instead.")
+ target_joint_pos = current_joint_pos
# Optionally pass through smoothing filter for better stability
if self.control_filter is not None:
diff --git a/omnigibson/envs/env_base.py b/omnigibson/envs/env_base.py
index 82a25747e..8a68e965d 100644
--- a/omnigibson/envs/env_base.py
+++ b/omnigibson/envs/env_base.py
@@ -4,6 +4,7 @@
import omnigibson as og
from omnigibson.objects import REGISTERED_OBJECTS
from omnigibson.robots import REGISTERED_ROBOTS
+from omnigibson.scene_graphs.graph_builder import SceneGraphBuilder
from omnigibson.tasks import REGISTERED_TASKS
from omnigibson.scenes import REGISTERED_SCENES
from omnigibson.utils.gym_utils import GymObservable, recursively_generate_flat_dict
@@ -71,6 +72,11 @@ def __init__(
for config in configs:
merge_nested_dicts(base_dict=self.config, extra_dict=parse_config(config), inplace=True)
+ # Create the scene graph builder
+ self._scene_graph_builder = None
+ if "scene_graph" in self.config and self.config["scene_graph"] is not None:
+ self._scene_graph_builder = SceneGraphBuilder(**self.config["scene_graph"])
+
# Set the simulator settings
og.sim.set_simulation_dt(physics_dt=physics_timestep, rendering_dt=action_timestep)
og.sim.viewer_width = self.render_config["viewer_width"]
@@ -321,6 +327,10 @@ def load(self):
self.load_observation_space()
self._load_action_space()
+ # Start the scene graph builder
+ if self._scene_graph_builder:
+ self._scene_graph_builder.start(self.scene)
+
# Denote that the scene is loaded
self._loaded = True
@@ -351,6 +361,16 @@ def get_obs(self):
obs = recursively_generate_flat_dict(dic=obs)
return obs
+
+ def get_scene_graph(self):
+ """
+ Get the current scene graph.
+
+ Returns:
+ SceneGraph: Current scene graph
+ """
+ assert self._scene_graph_builder is not None, "Scene graph builder must be specified in config!"
+ return self._scene_graph_builder.get_scene_graph()
def _populate_info(self, info):
"""
@@ -364,6 +384,9 @@ def _populate_info(self, info):
"""
info["episode_length"] = self._current_step
+ if self._scene_graph_builder is not None:
+ info["scene_graph"] = self.get_scene_graph()
+
def step(self, action):
"""
Apply robot's action and return the next state, reward, done and info,
@@ -381,41 +404,48 @@ def step(self, action):
- bool: done, i.e. whether this episode is terminated
- dict: info, i.e. dictionary with any useful information
"""
- # If the action is not a dictionary, convert into a dictionary
- if not isinstance(action, dict) and not isinstance(action, gym.spaces.Dict):
- action_dict = dict()
- idx = 0
+ try:
+ # If the action is not a dictionary, convert into a dictionary
+ if not isinstance(action, dict) and not isinstance(action, gym.spaces.Dict):
+ action_dict = dict()
+ idx = 0
+ for robot in self.robots:
+ action_dim = robot.action_dim
+ action_dict[robot.name] = action[idx: idx + action_dim]
+ idx += action_dim
+ else:
+ # Our inputted action is the action dictionary
+ action_dict = action
+
+ # Iterate over all robots and apply actions
for robot in self.robots:
- action_dim = robot.action_dim
- action_dict[robot.name] = action[idx: idx + action_dim]
- idx += action_dim
- else:
- # Our inputted action is the action dictionary
- action_dict = action
+ robot.apply_action(action_dict[robot.name])
- # Iterate over all robots and apply actions
- for robot in self.robots:
- robot.apply_action(action_dict[robot.name])
+ # Run simulation step
+ og.sim.step()
- # Run simulation step
- og.sim.step()
+ # Grab observations
+ obs = self.get_obs()
- # Grab observations
- obs = self.get_obs()
+ # Step the scene graph builder if necessary
+ if self._scene_graph_builder is not None:
+ self._scene_graph_builder.step(self.scene)
- # Grab reward, done, and info, and populate with internal info
- reward, done, info = self.task.step(self, action)
- self._populate_info(info)
+ # Grab reward, done, and info, and populate with internal info
+ reward, done, info = self.task.step(self, action)
+ self._populate_info(info)
- if done and self._automatic_reset:
- # Add lost observation to our information dict, and reset
- info["last_observation"] = obs
- obs = self.reset()
+ if done and self._automatic_reset:
+ # Add lost observation to our information dict, and reset
+ info["last_observation"] = obs
+ obs = self.reset()
- # Increment step
- self._current_step += 1
+ # Increment step
+ self._current_step += 1
- return obs, reward, done, info
+ return obs, reward, done, info
+ except:
+ raise ValueError(f"Failed to execute environment step {self._current_step} in episode {self._current_episode}")
def _reset_variables(self):
"""
diff --git a/omnigibson/examples/action_primitives/solve_task.py b/omnigibson/examples/action_primitives/solve_task.py
new file mode 100644
index 000000000..a9bed9daa
--- /dev/null
+++ b/omnigibson/examples/action_primitives/solve_task.py
@@ -0,0 +1,83 @@
+"""
+Example script demo'ing robot control to solve a task
+
+Options for keyboard control to solve task or programmtic action primitive
+"""
+import os
+import yaml
+
+import omnigibson as og
+from omnigibson.macros import gm
+from omnigibson.action_primitives.symbolic_semantic_action_primitives import SymbolicSemanticActionPrimitives, SymbolicSemanticActionPrimitiveSet
+from omnigibson.robots import REGISTERED_ROBOTS
+from omnigibson.utils.ui_utils import KeyboardRobotController
+
+
+# Don't use GPU dynamics and use flatcache for performance boost
+gm.USE_GPU_DYNAMICS = True
+gm.ENABLE_FLATCACHE = True
+
+def pause(time):
+ for _ in range(int(time*100)):
+ og.sim.step()
+
+def execute_controller(ctrl_gen, env):
+ for action in ctrl_gen:
+ env.step(action)
+
+def main(keyboard_control=False):
+ # Load the config
+ config_filename = os.path.join(og.example_config_path, "tiago_primitives.yaml")
+ config = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader)
+ if keyboard_control:
+ config["robots"][0]["action_normalize"] = True
+
+ # Update it to run a grocery shopping task
+ config["scene"]["scene_model"] = "Beechwood_0_int"
+ config["scene"]["load_task_relevant_only"] = True
+ config["scene"]["not_load_object_categories"] = ["ceilings"]
+ config["task"] = {
+ "type": "BehaviorTask",
+ "activity_name": "make_a_military_care_package",
+ "activity_definition_id": 0,
+ "activity_instance_id": 0,
+ "predefined_problem": None,
+ "online_object_sampling": False,
+ }
+
+ # Load the environment
+ env = og.Environment(configs=config)
+ scene = env.scene
+ robot = env.robots[0]
+
+ # Allow user to move camera more easily
+ og.sim.enable_viewer_camera_teleoperation()
+
+ if keyboard_control:
+ action_generator = KeyboardRobotController(robot=robot)
+ action_generator.print_keyboard_teleop_info()
+ action = action_generator.get_teleop_action()
+ print("Running demo.")
+ print("Press ESC to quit")
+ while True:
+ env.step(action=action)
+ else:
+ print("Move the camera around")
+ for _ in range(1000):
+ og.sim.render()
+ controller = SymbolicSemanticActionPrimitives(env)
+ # Grasp bottle of vodka
+ grasp_obj, = scene.object_registry("category", "bottle_of_vodka")
+
+ print("Executing controller")
+ execute_controller(controller.apply_ref(SymbolicSemanticActionPrimitiveSet.GRASP, grasp_obj), env)
+ print("Finished executing grasp")
+
+ # place bottle of vodka in box
+ print("Executing controller")
+ box, = scene.object_registry("category", "storage_box")
+ execute_controller(controller.apply_ref(SymbolicSemanticActionPrimitiveSet.PLACE_INSIDE, box), env)
+ print("Finished executing place")
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/omnigibson/examples/environments/behavior_env_demo.py b/omnigibson/examples/environments/behavior_env_demo.py
index fffbf876f..efa591ee3 100644
--- a/omnigibson/examples/environments/behavior_env_demo.py
+++ b/omnigibson/examples/environments/behavior_env_demo.py
@@ -8,6 +8,7 @@
# Make sure object states are enabled
gm.ENABLE_OBJECT_STATES = True
+gm.USE_GPU_DYNAMICS = True
def main(random_selection=False, headless=False, short_exec=False):
diff --git a/omnigibson/macros.py b/omnigibson/macros.py
index 6538479e1..045563ecc 100644
--- a/omnigibson/macros.py
+++ b/omnigibson/macros.py
@@ -33,7 +33,7 @@
# Do not suppress known omni warnings / errors, and also put omnigibson in a debug state
# This includes extra information for things such as object sampling, and also any debug
# logging messages
-gm.DEBUG = False
+gm.DEBUG = (os.getenv("OMNIGIBSON_DEBUG", 'False').lower() in ('true', '1', 't'))
# Whether to print out disclaimers (i.e.: known failure cases resulting from Omniverse's current bugs / limitations)
gm.SHOW_DISCLAIMERS = False
diff --git a/omnigibson/maps/traversable_map.py b/omnigibson/maps/traversable_map.py
index 8d1e7a6e4..83f3fb299 100644
--- a/omnigibson/maps/traversable_map.py
+++ b/omnigibson/maps/traversable_map.py
@@ -155,8 +155,11 @@ def build_trav_graph(map_size, maps_path, floor, trav_map):
# only take the largest connected component
largest_cc = max(nx.connected_components(g), key=len)
g = g.subgraph(largest_cc).copy()
- with open(graph_file, "wb") as pfile:
- pickle.dump(g, pfile)
+ try:
+ with open(graph_file, "wb") as pfile:
+ pickle.dump(g, pfile)
+ except:
+ log.warning("Cannot cache traversable graph to disk possibly because dataset is read-only. Will have to recompute each time.")
floor_graph.append(g)
diff --git a/omnigibson/object_states/__init__.py b/omnigibson/object_states/__init__.py
index f36cd7b61..3eb19ab4f 100644
--- a/omnigibson/object_states/__init__.py
+++ b/omnigibson/object_states/__init__.py
@@ -21,6 +21,7 @@
from omnigibson.object_states.particle_modifier import ParticleRemover, ParticleApplier
from omnigibson.object_states.particle_source_or_sink import ParticleSource, ParticleSink
from omnigibson.object_states.pose import Pose
+from omnigibson.object_states.robot_related_states import IsGrasping
from omnigibson.object_states.saturated import Saturated
from omnigibson.object_states.temperature import Temperature
from omnigibson.object_states.toggle import ToggledOn
diff --git a/omnigibson/object_states/factory.py b/omnigibson/object_states/factory.py
index 75b33aadf..731ee14b7 100644
--- a/omnigibson/object_states/factory.py
+++ b/omnigibson/object_states/factory.py
@@ -4,6 +4,7 @@
from omnigibson.object_states import *
_ABILITY_TO_STATE_MAPPING = {
+ "robot": [IsGrasping],
"attachable": [AttachedTo],
"blender": [],
"particleApplier": [ParticleApplier],
diff --git a/omnigibson/object_states/open_state.py b/omnigibson/object_states/open_state.py
index 1d59441e1..1c704bd0c 100644
--- a/omnigibson/object_states/open_state.py
+++ b/omnigibson/object_states/open_state.py
@@ -196,9 +196,12 @@ def _set_value(self, new_value, fully=False):
"""
Set the openness state, either to a random joint position satisfying the new value, or fully open/closed.
- @param new_value: bool value for the openness state of the object.
- @param fully: whether the object should be fully opened/closed (e.g. all relevant joints to 0/1).
- @return: bool indicating setter success. Failure may happen due to unannotated objects.
+ Args:
+ new_value (bool): The new value for the openness state of the object.
+ fully (bool): Whether the object should be fully opened/closed (e.g. all relevant joints to 0/1).
+
+ Returns:
+ bool: A boolean indicating the success of the setter. Failure may happen due to unannotated objects.
"""
both_sides, relevant_joints, joint_directions = self.relevant_joints_info
if not relevant_joints:
diff --git a/omnigibson/object_states/particle_modifier.py b/omnigibson/object_states/particle_modifier.py
index e12185855..c6676fba9 100644
--- a/omnigibson/object_states/particle_modifier.py
+++ b/omnigibson/object_states/particle_modifier.py
@@ -16,10 +16,10 @@
from omnigibson.systems.system_base import VisualParticleSystem, PhysicalParticleSystem, get_system, \
is_visual_particle_system, is_physical_particle_system, is_system_active, REGISTERED_SYSTEMS
from omnigibson.utils.constants import ParticleModifyMethod, ParticleModifyCondition, PrimType
+from omnigibson.utils.deprecated_utils import Core
from omnigibson.utils.geometry_utils import generate_points_in_volume_checker_function, \
get_particle_positions_in_frame, get_particle_positions_from_frame
from omnigibson.utils.python_utils import classproperty
-from omnigibson.utils.deprecated_utils import Core
from omnigibson.utils.ui_utils import suppress_omni_log
from omnigibson.utils.usd_utils import create_primitive_mesh, FlatcacheAPI
import omnigibson.utils.transform_utils as T
@@ -115,6 +115,7 @@ def create_projection_visualization(
source.GetRadiusAttr().Set(source_radius)
# Also make the prim invisible
UsdGeom.Imageable(source.GetPrim()).MakeInvisible()
+
# Generate the ComputeGraph nodes to render the projection
core = Core(lambda val: None, particle_system_name=projection_name)
@@ -509,6 +510,36 @@ def condition(obj):
return condition
+ def supports_system(self, system_name):
+ """
+ Checks whether this particle modifier supports adding/removing a particle from the specified
+ system, e.g. whether there exists any configuration (toggled on, etc.) in which this modifier
+ can be used to interact with any particles of this system.
+
+ Args:
+ system_name (str): Name of the particle system to check
+
+ Returns:
+ bool: Whether this particle modifier can add or remove a particle from the specified system
+ """
+ return system_name in self.conditions
+
+ def check_conditions_for_system(self, system_name):
+ """
+ Checks whether this particle modifier can add or remove a particle from the specified system
+ in its current configuration, e.g. all of the conditions for addition/removal other than
+ physical position are met.
+
+ Args:
+ system_name (str): Name of the particle system to check
+
+ Returns:
+ bool: Whether this particle modifier can add or remove a particle from the specified system
+ """
+ if not self.supports_system(system_name):
+ return False
+ return all(condition(self.obj) for condition in self.conditions[system_name])
+
def _update(self):
# If we're using projection method and flatcache, we need to manually update this object's transforms on the USD
# so the corresponding visualization and overlap meshes are updated properly
@@ -518,11 +549,11 @@ def _update(self):
# Check if there's any overlap and if we're at the correct step
if self._current_step == 0 and (not self.requires_overlap or self._check_overlap()):
# Iterate over all owned systems for this particle modifier
- for system_name, conditions in self.conditions.items():
+ for system_name in self.conditions.keys():
# Check if the system is active (for ParticleApplier, the system is always active)
if is_system_active(system_name):
# Check if all conditions are met
- if np.all([condition(self.obj) for condition in conditions]):
+ if self.check_conditions_for_system(system_name):
system = get_system(system_name)
# Update saturation limit if it's not specified yet
limit = self.visual_particle_modification_limit \
diff --git a/omnigibson/object_states/robot_related_states.py b/omnigibson/object_states/robot_related_states.py
new file mode 100644
index 000000000..47040e899
--- /dev/null
+++ b/omnigibson/object_states/robot_related_states.py
@@ -0,0 +1,62 @@
+import numpy as np
+import omnigibson as og
+from omnigibson.object_states.object_state_base import AbsoluteObjectState, BooleanStateMixin, RelativeObjectState
+
+
+_IN_REACH_DISTANCE_THRESHOLD = 2.0
+
+_IN_FOV_PIXEL_FRACTION_THRESHOLD = 0.05
+
+
+class RobotStateMixin:
+ @property
+ def robot(self):
+ from omnigibson.robots.robot_base import BaseRobot
+ assert isinstance(self.obj, BaseRobot), "This state only works with robots."
+ return self.obj
+
+
+class IsGrasping(RelativeObjectState, BooleanStateMixin, RobotStateMixin):
+ def _get_value(self, obj):
+ # TODO: Make this work with non-assisted grasping
+ return any(
+ self.robot._ag_obj_in_hand[arm] == obj
+ for arm in self.robot.arm_names
+ )
+
+
+# class InReachOfRobot(AbsoluteObjectState, BooleanStateMixin):
+# def _compute_value(self):
+# robot = _get_robot(self.simulator)
+# if not robot:
+# return False
+
+# robot_pos = robot.get_position()
+# object_pos = self.obj.get_position()
+# return np.linalg.norm(object_pos - np.array(robot_pos)) < _IN_REACH_DISTANCE_THRESHOLD
+
+
+# class InFOVOfRobot(AbsoluteObjectState, BooleanStateMixin):
+# @staticmethod
+# def get_optional_dependencies():
+# return AbsoluteObjectState.get_optional_dependencies() + [ObjectsInFOVOfRobot]
+
+# def _get_value(self):
+# robot = _get_robot(self.simulator)
+# if not robot:
+# return False
+
+# body_ids = set(self.obj.get_body_ids())
+# return not body_ids.isdisjoint(robot.states[ObjectsInFOVOfRobot].get_value())
+
+
+# class ObjectsInFOVOfRobot(AbsoluteObjectState):
+# def _get_value(self):
+# # Pass the FOV through the instance-to-body ID mapping.
+# seg = self.simulator.renderer.render_single_robot_camera(self.obj, modes="ins_seg")[0][:, :, 0]
+# seg = np.round(seg * MAX_INSTANCE_COUNT).astype(int)
+# body_ids = self.simulator.renderer.get_pb_ids_for_instance_ids(seg)
+
+# # Pixels that don't contain an object are marked -1 but we don't want to include that
+# # as a body ID.
+# return set(np.unique(body_ids)) - {-1}
\ No newline at end of file
diff --git a/omnigibson/object_states/toggle.py b/omnigibson/object_states/toggle.py
index d27cf88fa..781c12139 100644
--- a/omnigibson/object_states/toggle.py
+++ b/omnigibson/object_states/toggle.py
@@ -89,7 +89,10 @@ def overlap_callback(hit):
def check_overlap():
nonlocal valid_hit
valid_hit = False
- og.sim.psqi.overlap_mesh(*projection_mesh_ids, reportFn=overlap_callback)
+ if self.visual_marker.prim.GetTypeName() == "Mesh":
+ og.sim.psqi.overlap_mesh(*projection_mesh_ids, reportFn=overlap_callback)
+ else:
+ og.sim.psqi.overlap_shape(*projection_mesh_ids, reportFn=overlap_callback)
return valid_hit
self._check_overlap = check_overlap
diff --git a/omnigibson/objects/dataset_object.py b/omnigibson/objects/dataset_object.py
index aa536ce61..37e582e70 100644
--- a/omnigibson/objects/dataset_object.py
+++ b/omnigibson/objects/dataset_object.py
@@ -235,7 +235,7 @@ def _post_load(self):
valid_idxes = self.native_bbox > 1e-4
scale[valid_idxes] = np.array(self._load_config["bounding_box"])[valid_idxes] / self.native_bbox[valid_idxes]
else:
- scale = np.ones(3) if self._load_config["scale"] is None else self._load_config["scale"]
+ scale = np.ones(3) if self._load_config["scale"] is None else np.array(self._load_config["scale"])
# Assert that the scale does not have too small dimensions
assert np.all(scale > 1e-4), f"Scale of {self.name} is too small: {scale}"
@@ -483,7 +483,6 @@ def get_base_aligned_bbox(self, link_name=None, visual=False, xy_aligned=False,
- 3-array: (x,y,z) bbox extent in desired frame
- 3-array: (x,y,z) bbox center in desired frame
"""
- assert self.prim_type == PrimType.RIGID, "get_base_aligned_bbox is only supported for rigid objects."
bbox_type = "visual" if visual else "collision"
# Get the base position transform.
@@ -497,61 +496,70 @@ def get_base_aligned_bbox(self, link_name=None, visual=False, xy_aligned=False,
# this set of points to get our final, base-frame bounding box.
points = []
- links = {link_name: self._links[link_name]} if link_name is not None else self._links
- for link_name, link in links.items():
- # If the link has no visual or collision meshes, we skip over it (based on the @visual flag)
- meshes = link.visual_meshes if visual else link.collision_meshes
- if len(meshes) == 0:
- continue
-
- # If the link has a bounding box annotation.
- if self.native_link_bboxes is not None and link_name in self.native_link_bboxes:
- # Check if the annotation is still missing.
- if bbox_type not in self.native_link_bboxes[link_name]:
- raise ValueError(f"Could not find {bbox_type} bounding box for object {self.name} link {link_name}")
-
- # Get the extent and transform.
- bb_data = self.native_link_bboxes[link_name][bbox_type][link_bbox_type]
- extent_in_bbox_frame = np.array(bb_data["extent"])
- bbox_to_link_origin = np.array(bb_data["transform"])
-
- # # Get the link's pose in the base frame.
- link_frame_to_world = T.pose2mat(link.get_position_orientation())
- link_frame_to_base_frame = world_to_base_frame @ link_frame_to_world
-
- # Scale the bounding box in link origin frame. Here we create a transform that first puts the bounding
- # box's vertices into the link frame, and then scales them to match the scale applied to this object.
- # Note that once scaled, the vertices of the bounding box do not necessarily form a cuboid anymore but
- # instead a parallelepiped. This is not a problem because we later fit a bounding box to the points,
- # this time in the object's base link frame.
- scale_in_link_frame = np.diag(np.concatenate([self.scales_in_link_frame[link_name], [1]]))
- bbox_to_scaled_link_origin = np.dot(scale_in_link_frame, bbox_to_link_origin)
-
- # Compute the bounding box vertices in the base frame.
- # bbox_to_link_com = np.dot(link_origin_to_link_com, bbox_to_scaled_link_origin)
- bbox_center_in_base_frame = np.dot(link_frame_to_base_frame, bbox_to_scaled_link_origin)
- vertices_in_base_frame = np.array(list(itertools.product((1, -1), repeat=3))) * (extent_in_bbox_frame / 2)
-
- # Add the points to our collection of points.
- points.extend(trimesh.transformations.transform_points(vertices_in_base_frame, bbox_center_in_base_frame))
- elif fallback_to_aabb:
- # If we're visual and the mesh is not visible, there is no fallback so continue
- if bbox_type == "visual" and not np.all(tuple(mesh.visible for mesh in meshes.values())):
+ if self.prim_type == PrimType.CLOTH:
+ particle_contact_offset = self.root_link.cloth_system.particle_contact_offset
+ particle_positions = self.root_link.compute_particle_positions()
+ particles_in_world_frame = np.concatenate([
+ particle_positions - particle_contact_offset,
+ particle_positions + particle_contact_offset
+ ], axis=0)
+ points.extend(trimesh.transformations.transform_points(particles_in_world_frame, world_to_base_frame))
+ else:
+ links = {link_name: self._links[link_name]} if link_name is not None else self._links
+ for link_name, link in links.items():
+ # If the link has no visual or collision meshes, we skip over it (based on the @visual flag)
+ meshes = link.visual_meshes if visual else link.collision_meshes
+ if len(meshes) == 0:
continue
- # If no BB annotation is available, get the AABB for this link.
- aabb_center, aabb_extent = BoundingBoxAPI.compute_center_extent(prim=link)
- aabb_vertices_in_world = aabb_center + np.array(list(itertools.product((1, -1), repeat=3))) * (
- aabb_extent / 2
- )
- aabb_vertices_in_base_frame = trimesh.transformations.transform_points(
- aabb_vertices_in_world, world_to_base_frame
- )
- points.extend(aabb_vertices_in_base_frame)
- else:
- raise ValueError(
- "Bounding box annotation missing for link: %s. Use fallback_to_aabb=True if you're okay with using "
- "AABB as fallback." % link_name
- )
+
+ # If the link has a bounding box annotation.
+ if self.native_link_bboxes is not None and link_name in self.native_link_bboxes:
+ # Check if the annotation is still missing.
+ if bbox_type not in self.native_link_bboxes[link_name]:
+ raise ValueError(f"Could not find {bbox_type} bounding box for object {self.name} link {link_name}")
+
+ # Get the extent and transform.
+ bb_data = self.native_link_bboxes[link_name][bbox_type][link_bbox_type]
+ extent_in_bbox_frame = np.array(bb_data["extent"])
+ bbox_to_link_origin = np.array(bb_data["transform"])
+
+ # # Get the link's pose in the base frame.
+ link_frame_to_world = T.pose2mat(link.get_position_orientation())
+ link_frame_to_base_frame = world_to_base_frame @ link_frame_to_world
+
+ # Scale the bounding box in link origin frame. Here we create a transform that first puts the bounding
+ # box's vertices into the link frame, and then scales them to match the scale applied to this object.
+ # Note that once scaled, the vertices of the bounding box do not necessarily form a cuboid anymore but
+ # instead a parallelepiped. This is not a problem because we later fit a bounding box to the points,
+ # this time in the object's base link frame.
+ scale_in_link_frame = np.diag(np.concatenate([self.scales_in_link_frame[link_name], [1]]))
+ bbox_to_scaled_link_origin = np.dot(scale_in_link_frame, bbox_to_link_origin)
+
+ # Compute the bounding box vertices in the base frame.
+ # bbox_to_link_com = np.dot(link_origin_to_link_com, bbox_to_scaled_link_origin)
+ bbox_center_in_base_frame = np.dot(link_frame_to_base_frame, bbox_to_scaled_link_origin)
+ vertices_in_base_frame = np.array(list(itertools.product((1, -1), repeat=3))) * (extent_in_bbox_frame / 2)
+
+ # Add the points to our collection of points.
+ points.extend(trimesh.transformations.transform_points(vertices_in_base_frame, bbox_center_in_base_frame))
+ elif fallback_to_aabb: # always default to AABB for cloth
+ # If we're visual and the mesh is not visible, there is no fallback so continue
+ if bbox_type == "visual" and not np.all(tuple(mesh.visible for mesh in meshes.values())):
+ continue
+ # If no BB annotation is available, get the AABB for this link.
+ aabb_center, aabb_extent = BoundingBoxAPI.compute_center_extent(prim=link)
+ aabb_vertices_in_world = aabb_center + np.array(list(itertools.product((1, -1), repeat=3))) * (
+ aabb_extent / 2
+ )
+ aabb_vertices_in_base_frame = trimesh.transformations.transform_points(
+ aabb_vertices_in_world, world_to_base_frame
+ )
+ points.extend(aabb_vertices_in_base_frame)
+ else:
+ raise ValueError(
+ "Bounding box annotation missing for link: %s. Use fallback_to_aabb=True if you're okay with using "
+ "AABB as fallback." % link_name
+ )
if xy_aligned:
# If the user requested an XY-plane aligned bbox, convert everything to that frame.
diff --git a/omnigibson/objects/light_object.py b/omnigibson/objects/light_object.py
index 62d975bd4..1805c483e 100644
--- a/omnigibson/objects/light_object.py
+++ b/omnigibson/objects/light_object.py
@@ -161,7 +161,8 @@ def radius(self):
Returns:
float: radius for this light
"""
- return self._light_link.get_attribute("radius")
+ return self._light_link.get_attribute(
+ "inputs:radius" if meets_minimum_isaac_version("2023.0.0") else "radius")
@radius.setter
def radius(self, radius):
@@ -171,7 +172,9 @@ def radius(self, radius):
Args:
radius (float): radius to set
"""
- self._light_link.set_attribute("radius", radius)
+ self._light_link.set_attribute(
+ "inputs:radius" if meets_minimum_isaac_version("2023.0.0") else "radius",
+ radius)
@property
def intensity(self):
diff --git a/omnigibson/prims/entity_prim.py b/omnigibson/prims/entity_prim.py
index ea33efd2b..14793c37d 100644
--- a/omnigibson/prims/entity_prim.py
+++ b/omnigibson/prims/entity_prim.py
@@ -807,7 +807,10 @@ def get_joint_positions(self, normalized=False):
assert self._handle is not None, "handles are not initialized yet!"
assert self.n_joints > 0, "Tried to call method not intended for entity prim with no joints!"
- joint_positions = self._dc.get_articulation_dof_states(self._handle, _dynamic_control.STATE_POS)["pos"]
+ assert self._dc is not None, f"Dynamic control interface is not initialized for {self._name}"
+ joint_states = self._dc.get_articulation_dof_states(self._handle, _dynamic_control.STATE_POS)
+ assert joint_states is not None, f"Failed to get joint states for {self.name}"
+ joint_positions = joint_states["pos"]
# Possibly normalize values when returning
return self._normalize_positions(positions=joint_positions) if normalized else joint_positions
diff --git a/omnigibson/prims/rigid_prim.py b/omnigibson/prims/rigid_prim.py
index 04d4da334..c3bb50217 100644
--- a/omnigibson/prims/rigid_prim.py
+++ b/omnigibson/prims/rigid_prim.py
@@ -183,7 +183,7 @@ def update_meshes(self):
# We need to translate the center of mass from the mesh's local frame to the link's local frame
local_pos, local_orn = mesh.get_local_pose()
coms.append(T.quat2mat(local_orn) @ (com * mesh.scale) + local_pos)
- # If we're not a valid volume, use bounding box approximation for the underlying collision approx
+ # If we're not a valid volume, use bounding box approximation for the underlying collision approximation
if not is_volume:
log.warning(f"Got invalid (non-volume) collision mesh: {mesh.name}")
mesh.set_collision_approximation("boundingCube")
@@ -294,6 +294,8 @@ def set_position_orientation(self, position=None, orientation=None):
position = current_position
if orientation is None:
orientation = current_orientation
+ assert np.isclose(np.linalg.norm(orientation), 1, atol=1e-3), \
+ f"{self.prim_path} desired orientation {orientation} is not a unit quaternion."
pose = _dynamic_control.Transform(position, orientation)
self._dc.set_rigid_body_pose(self._handle, pose)
else:
@@ -308,7 +310,9 @@ def get_position_orientation(self):
# Call super method by default
pos, ori = super().get_position_orientation()
- return np.array(pos), np.array(ori)
+ assert np.isclose(np.linalg.norm(ori), 1, atol=1e-3), \
+ f"{self.prim_path} orientation {ori} is not a unit quaternion."
+ return pos, ori
def set_local_pose(self, translation=None, orientation=None):
if self.dc_is_accessible:
diff --git a/omnigibson/prims/xform_prim.py b/omnigibson/prims/xform_prim.py
index dc50c013e..041aa9e6c 100644
--- a/omnigibson/prims/xform_prim.py
+++ b/omnigibson/prims/xform_prim.py
@@ -154,6 +154,8 @@ def set_position_orientation(self, position=None, orientation=None):
position = current_position if position is None else np.array(position, dtype=float)
orientation = current_orientation if orientation is None else np.array(orientation, dtype=float)
orientation = orientation[[3, 0, 1, 2]] # Flip from x,y,z,w to w,x,y,z
+ assert np.isclose(np.linalg.norm(orientation), 1, atol=1e-3), \
+ f"{self.prim_path} desired orientation {orientation} is not a unit quaternion."
mat = Gf.Transform()
mat.SetRotation(Gf.Rotation(Gf.Quatd(*orientation)))
@@ -236,6 +238,22 @@ def get_rpy(self):
3-array: (roll, pitch, yaw) global euler orientation of this prim
"""
return quat2euler(self.get_orientation())
+
+ def get_2d_orientation(self):
+ """
+ Get this prim's orientation on the XY plane of the world frame. This is obtained by
+ projecting the forward vector onto the XY plane and then computing the angle.
+ """
+ fwd = R.from_quat(self.get_orientation()).apply([1, 0, 0])
+ fwd[2] = 0.
+
+ # If the object is facing close to straight up, then we can't compute a 2D orientation
+ # in that case, we return zero.
+ if np.linalg.norm(fwd) < 1e-4:
+ return 0.
+
+ fwd /= np.linalg.norm(fwd)
+ return np.arctan2(fwd[1], fwd[0])
def get_local_pose(self):
"""
diff --git a/omnigibson/robots/__init__.py b/omnigibson/robots/__init__.py
index 3c3088cdb..17b4af9a0 100644
--- a/omnigibson/robots/__init__.py
+++ b/omnigibson/robots/__init__.py
@@ -9,3 +9,5 @@
from omnigibson.robots.fetch import Fetch
from omnigibson.robots.tiago import Tiago
from omnigibson.robots.two_wheel_robot import TwoWheelRobot
+from omnigibson.robots.franka import FrankaPanda
+from omnigibson.robots.franka_allegro import FrankaAllegro
diff --git a/omnigibson/robots/fetch.py b/omnigibson/robots/fetch.py
index ebbc189d8..5e41af749 100644
--- a/omnigibson/robots/fetch.py
+++ b/omnigibson/robots/fetch.py
@@ -7,8 +7,11 @@
from omnigibson.robots.manipulation_robot import GraspingPoint, ManipulationRobot
from omnigibson.robots.two_wheel_robot import TwoWheelRobot
from omnigibson.utils.python_utils import assert_valid_key
+from omnigibson.utils.ui_utils import create_module_logger
from omnigibson.utils.usd_utils import JointType
+log = create_module_logger(module_name=__name__)
+
DEFAULT_ARM_POSES = {
"vertical",
"diagonal15",
@@ -60,6 +63,7 @@ def __init__(
# Unique to ManipulationRobot
grasping_mode="physical",
+ disable_grasp_handling=False,
# Unique to Fetch
rigid_trunk=False,
@@ -112,6 +116,8 @@ def __init__(
If "physical", no assistive grasping will be applied (relies on contact friction + finger force).
If "assisted", will magnetize any object touching and within the gripper's fingers.
If "sticky", will magnetize any object touching the gripper's fingers.
+ disable_grasp_handling (bool): If True, will disable all grasp handling for this object. This means that
+ sticky and assisted grasp modes will not work unless the connection/release methodsare manually called.
rigid_trunk (bool) if True, will prevent the trunk from moving during execution.
default_trunk_offset (float): sets the default height of the robot's trunk
default_arm_pose (str): Default pose for the robot arm. Should be one of:
@@ -156,6 +162,7 @@ def __init__(
proprio_obs=proprio_obs,
sensor_config=sensor_config,
grasping_mode=grasping_mode,
+ disable_grasp_handling=disable_grasp_handling,
**kwargs,
)
@@ -216,7 +223,24 @@ def untucked_default_joint_pos(self):
else:
raise ValueError("Unknown default arm pose: {}".format(self.default_arm_pose))
return pos
-
+
+ def _post_load(self):
+ super()._post_load()
+
+ # Set the wheels back to using sphere approximations
+ for wheel_name in ["l_wheel_link", "r_wheel_link"]:
+ log.warning(
+ "Fetch wheel links are post-processed to use sphere approximation collision meshes."
+ "Please ignore any previous errors about these collision meshes.")
+ wheel_link = self.links[wheel_name]
+ assert set(wheel_link.collision_meshes) == {"collisions"}, "Wheel link should only have 1 collision!"
+ wheel_link.collision_meshes["collisions"].set_collision_approximation("boundingSphere")
+
+ # Also apply a convex decomposition to the torso lift link
+ torso_lift_link = self.links["torso_lift_link"]
+ assert set(torso_lift_link.collision_meshes) == {"collisions"}, "Wheel link should only have 1 collision!"
+ torso_lift_link.collision_meshes["collisions"].set_collision_approximation("convexDecomposition")
+
@property
def discrete_action_list(self):
# Not supported for this robot
@@ -387,6 +411,40 @@ def disabled_collision_pairs(self):
return [
["torso_lift_link", "shoulder_lift_link"],
["torso_lift_link", "torso_fixed_link"],
+ ["torso_lift_link", "estop_link"],
+ ["base_link", "laser_link"],
+ ["base_link", "torso_fixed_link"],
+ ["base_link", "l_wheel_link"],
+ ["base_link", "r_wheel_link"],
+ ["base_link", "estop_link"],
+ ["torso_lift_link", "shoulder_pan_link"],
+ ["torso_lift_link", "head_pan_link"],
+ ["head_pan_link", "head_tilt_link"],
+ ["shoulder_pan_link", "shoulder_lift_link"],
+ ["shoulder_lift_link", "upperarm_roll_link"],
+ ["upperarm_roll_link", "elbow_flex_link"],
+ ["elbow_flex_link", "forearm_roll_link"],
+ ["forearm_roll_link", "wrist_flex_link"],
+ ["wrist_flex_link", "wrist_roll_link"],
+ ["wrist_roll_link", "gripper_link"],
+ ]
+
+ @property
+ def manipulation_link_names(self):
+ return [
+ "torso_lift_link",
+ "head_pan_link",
+ "head_tilt_link",
+ "shoulder_pan_link",
+ "shoulder_lift_link",
+ "upperarm_roll_link",
+ "elbow_flex_link",
+ "forearm_roll_link",
+ "wrist_flex_link",
+ "wrist_roll_link",
+ "gripper_link",
+ "l_gripper_finger_link",
+ "r_gripper_finger_link",
]
@property
@@ -437,3 +495,9 @@ def robot_arm_descriptor_yamls(self):
@property
def urdf_path(self):
return os.path.join(gm.ASSET_PATH, "models/fetch/fetch.urdf")
+
+ @property
+ def arm_workspace_range(self):
+ return {
+ self.default_arm : [np.deg2rad(-45), np.deg2rad(45)]
+ }
diff --git a/omnigibson/robots/franka.py b/omnigibson/robots/franka.py
new file mode 100644
index 000000000..c4e2f9a90
--- /dev/null
+++ b/omnigibson/robots/franka.py
@@ -0,0 +1,206 @@
+import os
+import numpy as np
+
+from omnigibson.macros import gm
+from omnigibson.robots.manipulation_robot import ManipulationRobot
+
+
+class FrankaPanda(ManipulationRobot):
+ """
+ The Franka Emika Panda robot
+ """
+
+ def __init__(
+ self,
+ # Shared kwargs in hierarchy
+ name,
+ prim_path=None,
+ class_id=None,
+ uuid=None,
+ scale=None,
+ visible=True,
+ visual_only=False,
+ self_collisions=True,
+ load_config=None,
+ fixed_base=True,
+
+ # Unique to USDObject hierarchy
+ abilities=None,
+
+ # Unique to ControllableObject hierarchy
+ control_freq=None,
+ controller_config=None,
+ action_type="continuous",
+ action_normalize=True,
+ reset_joint_pos=None,
+
+ # Unique to BaseRobot
+ obs_modalities="all",
+ proprio_obs="default",
+ sensor_config=None,
+
+ # Unique to ManipulationRobot
+ grasping_mode="physical",
+
+ **kwargs,
+ ):
+ """
+ Args:
+ name (str): Name for the object. Names need to be unique per scene
+ prim_path (None or str): global path in the stage to this object. If not specified, will automatically be
+ created at /World/
+ class_id (None or int): What class ID the object should be assigned in semantic segmentation rendering mode.
+ If None, the ID will be inferred from this object's category.
+ uuid (None or int): Unique unsigned-integer identifier to assign to this object (max 8-numbers).
+ If None is specified, then it will be auto-generated
+ scale (None or float or 3-array): if specified, sets either the uniform (float) or x,y,z (3-array) scale
+ for this object. A single number corresponds to uniform scaling along the x,y,z axes, whereas a
+ 3-array specifies per-axis scaling.
+ visible (bool): whether to render this object or not in the stage
+ visual_only (bool): Whether this object should be visual only (and not collide with any other objects)
+ self_collisions (bool): Whether to enable self collisions for this object
+ load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
+ loading this prim at runtime.
+ abilities (None or dict): If specified, manually adds specific object states to this object. It should be
+ a dict in the form of {ability: {param: value}} containing object abilities and parameters to pass to
+ the object state instance constructor.
+ control_freq (float): control frequency (in Hz) at which to control the object. If set to be None,
+ simulator.import_object will automatically set the control frequency to be 1 / render_timestep by default.
+ controller_config (None or dict): nested dictionary mapping controller name(s) to specific controller
+ configurations for this object. This will override any default values specified by this class.
+ action_type (str): one of {discrete, continuous} - what type of action space to use
+ action_normalize (bool): whether to normalize inputted actions. This will override any default values
+ specified by this class.
+ reset_joint_pos (None or n-array): if specified, should be the joint positions that the object should
+ be set to during a reset. If None (default), self.default_joint_pos will be used instead.
+ obs_modalities (str or list of str): Observation modalities to use for this robot. Default is "all", which
+ corresponds to all modalities being used.
+ Otherwise, valid options should be part of omnigibson.sensors.ALL_SENSOR_MODALITIES.
+ Note: If @sensor_config explicitly specifies `modalities` for a given sensor class, it will
+ override any values specified from @obs_modalities!
+ proprio_obs (str or list of str): proprioception observation key(s) to use for generating proprioceptive
+ observations. If str, should be exactly "default" -- this results in the default proprioception
+ observations being used, as defined by self.default_proprio_obs. See self._get_proprioception_dict
+ for valid key choices
+ sensor_config (None or dict): nested dictionary mapping sensor class name(s) to specific sensor
+ configurations for this object. This will override any default values specified by this class.
+ grasping_mode (str): One of {"physical", "assisted", "sticky"}.
+ If "physical", no assistive grasping will be applied (relies on contact friction + finger force).
+ If "assisted", will magnetize any object touching and within the gripper's fingers.
+ If "sticky", will magnetize any object touching the gripper's fingers.
+ kwargs (dict): Additional keyword arguments that are used for other super() calls from subclasses, allowing
+ for flexible compositions of various object subclasses (e.g.: Robot is USDObject + ControllableObject).
+ """
+ # Run super init
+ super().__init__(
+ prim_path=prim_path,
+ name=name,
+ class_id=class_id,
+ uuid=uuid,
+ scale=scale,
+ visible=visible,
+ fixed_base=fixed_base,
+ visual_only=visual_only,
+ self_collisions=self_collisions,
+ load_config=load_config,
+ abilities=abilities,
+ control_freq=control_freq,
+ controller_config=controller_config,
+ action_type=action_type,
+ action_normalize=action_normalize,
+ reset_joint_pos=reset_joint_pos,
+ obs_modalities=obs_modalities,
+ proprio_obs=proprio_obs,
+ sensor_config=sensor_config,
+ grasping_mode=grasping_mode,
+ **kwargs,
+ )
+
+ @property
+ def model_name(self):
+ return "FrankaPanda"
+
+ @property
+ def discrete_action_list(self):
+ # Not supported for this robot
+ raise NotImplementedError()
+
+ def _create_discrete_action_space(self):
+ # Fetch does not support discrete actions
+ raise ValueError("Franka does not support discrete actions!")
+
+ def tuck(self):
+ """
+ Immediately set this robot's configuration to be in tucked mode
+ """
+ self.set_joint_positions(self.tucked_default_joint_pos)
+
+ def untuck(self):
+ """
+ Immediately set this robot's configuration to be in untucked mode
+ """
+ self.set_joint_positions(self.untucked_default_joint_pos)
+
+ def update_controller_mode(self):
+ super().update_controller_mode()
+ # overwrite joint params (e.g. damping, stiffess, max_effort) here
+
+ @property
+ def controller_order(self):
+ return ["arm_{}".format(self.default_arm), "gripper_{}".format(self.default_arm)]
+
+ @property
+ def _default_controllers(self):
+ controllers = super()._default_controllers
+ controllers["arm_{}".format(self.default_arm)] = "InverseKinematicsController"
+ controllers["gripper_{}".format(self.default_arm)] = "MultiFingerGripperController"
+ return controllers
+
+ @property
+ def default_joint_pos(self):
+ return np.array([0.00, -1.3, 0.00, -2.87, 0.00, 2.00, 0.75, 0.00, 0.00])
+
+ @property
+ def finger_lengths(self):
+ return {self.default_arm: 0.1}
+
+ @property
+ def arm_control_idx(self):
+ return {self.default_arm: np.arange(7)}
+
+ @property
+ def gripper_control_idx(self):
+ return {self.default_arm: np.arange(7, 9)}
+
+ @property
+ def arm_link_names(self):
+ return {self.default_arm: [f"panda_link{i}" for i in range(8)]}
+
+ @property
+ def arm_joint_names(self):
+ return {self.default_arm: [f"panda_joint_{i+1}" for i in range(7)]}
+
+ @property
+ def eef_link_names(self):
+ return {self.default_arm: "panda_hand"}
+
+ @property
+ def finger_link_names(self):
+ return {self.default_arm: ["panda_leftfinger", "panda_rightfinger"]}
+
+ @property
+ def finger_joint_names(self):
+ return {self.default_arm: ["panda_finger_joint1", "panda_finger_joint2"]}
+
+ @property
+ def usd_path(self):
+ return os.path.join(gm.ASSET_PATH, "models/franka/franka_panda.usd")
+
+ @property
+ def robot_arm_descriptor_yamls(self):
+ return {self.default_arm: os.path.join(gm.ASSET_PATH, "models/franka/franka_panda_description.yaml")}
+
+ @property
+ def urdf_path(self):
+ return os.path.join(gm.ASSET_PATH, "models/franka/franka_panda.urdf")
+
\ No newline at end of file
diff --git a/omnigibson/robots/franka_allegro.py b/omnigibson/robots/franka_allegro.py
new file mode 100644
index 000000000..9303d313c
--- /dev/null
+++ b/omnigibson/robots/franka_allegro.py
@@ -0,0 +1,230 @@
+import os
+import numpy as np
+
+from omnigibson.macros import gm
+from omnigibson.robots.manipulation_robot import ManipulationRobot
+
+
+class FrankaAllegro(ManipulationRobot):
+ """
+ Franka Robot with Allegro hand
+ """
+
+ def __init__(
+ self,
+ # Shared kwargs in hierarchy
+ name,
+ prim_path=None,
+ class_id=None,
+ uuid=None,
+ scale=None,
+ visible=True,
+ visual_only=False,
+ self_collisions=True,
+ load_config=None,
+ fixed_base=True,
+
+ # Unique to USDObject hierarchy
+ abilities=None,
+
+ # Unique to ControllableObject hierarchy
+ control_freq=None,
+ controller_config=None,
+ action_type="continuous",
+ action_normalize=True,
+ reset_joint_pos=None,
+
+ # Unique to BaseRobot
+ obs_modalities="all",
+ proprio_obs="default",
+ sensor_config=None,
+
+ # Unique to ManipulationRobot
+ grasping_mode="physical",
+
+ **kwargs,
+ ):
+ """
+ Args:
+ name (str): Name for the object. Names need to be unique per scene
+ prim_path (None or str): global path in the stage to this object. If not specified, will automatically be
+ created at /World/
+ class_id (None or int): What class ID the object should be assigned in semantic segmentation rendering mode.
+ If None, the ID will be inferred from this object's category.
+ uuid (None or int): Unique unsigned-integer identifier to assign to this object (max 8-numbers).
+ If None is specified, then it will be auto-generated
+ scale (None or float or 3-array): if specified, sets either the uniform (float) or x,y,z (3-array) scale
+ for this object. A single number corresponds to uniform scaling along the x,y,z axes, whereas a
+ 3-array specifies per-axis scaling.
+ visible (bool): whether to render this object or not in the stage
+ visual_only (bool): Whether this object should be visual only (and not collide with any other objects)
+ self_collisions (bool): Whether to enable self collisions for this object
+ load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
+ loading this prim at runtime.
+ abilities (None or dict): If specified, manually adds specific object states to this object. It should be
+ a dict in the form of {ability: {param: value}} containing object abilities and parameters to pass to
+ the object state instance constructor.
+ control_freq (float): control frequency (in Hz) at which to control the object. If set to be None,
+ simulator.import_object will automatically set the control frequency to be 1 / render_timestep by default.
+ controller_config (None or dict): nested dictionary mapping controller name(s) to specific controller
+ configurations for this object. This will override any default values specified by this class.
+ action_type (str): one of {discrete, continuous} - what type of action space to use
+ action_normalize (bool): whether to normalize inputted actions. This will override any default values
+ specified by this class.
+ reset_joint_pos (None or n-array): if specified, should be the joint positions that the object should
+ be set to during a reset. If None (default), self.default_joint_pos will be used instead.
+ obs_modalities (str or list of str): Observation modalities to use for this robot. Default is "all", which
+ corresponds to all modalities being used.
+ Otherwise, valid options should be part of omnigibson.sensors.ALL_SENSOR_MODALITIES.
+ Note: If @sensor_config explicitly specifies `modalities` for a given sensor class, it will
+ override any values specified from @obs_modalities!
+ proprio_obs (str or list of str): proprioception observation key(s) to use for generating proprioceptive
+ observations. If str, should be exactly "default" -- this results in the default proprioception
+ observations being used, as defined by self.default_proprio_obs. See self._get_proprioception_dict
+ for valid key choices
+ sensor_config (None or dict): nested dictionary mapping sensor class name(s) to specific sensor
+ configurations for this object. This will override any default values specified by this class.
+ grasping_mode (str): One of {"physical", "assisted", "sticky"}.
+ If "physical", no assistive grasping will be applied (relies on contact friction + finger force).
+ If "assisted", will magnetize any object touching and within the gripper's fingers.
+ If "sticky", will magnetize any object touching the gripper's fingers.
+ kwargs (dict): Additional keyword arguments that are used for other super() calls from subclasses, allowing
+ for flexible compositions of various object subclasses (e.g.: Robot is USDObject + ControllableObject).
+ """
+
+ # Run super init
+ super().__init__(
+ prim_path=prim_path,
+ name=name,
+ class_id=class_id,
+ uuid=uuid,
+ scale=scale,
+ visible=visible,
+ fixed_base=fixed_base,
+ visual_only=visual_only,
+ self_collisions=self_collisions,
+ load_config=load_config,
+ abilities=abilities,
+ control_freq=control_freq,
+ controller_config=controller_config,
+ action_type=action_type,
+ action_normalize=action_normalize,
+ reset_joint_pos=reset_joint_pos,
+ obs_modalities=obs_modalities,
+ proprio_obs=proprio_obs,
+ sensor_config=sensor_config,
+ grasping_mode=grasping_mode,
+ **kwargs,
+ )
+
+ @property
+ def model_name(self):
+ return "FrankaAllegro"
+
+ @property
+ def discrete_action_list(self):
+ # Not supported for this robot
+ raise NotImplementedError()
+
+ def _create_discrete_action_space(self):
+ # Fetch does not support discrete actions
+ raise ValueError("Franka does not support discrete actions!")
+
+ def tuck(self):
+ """
+ Immediately set this robot's configuration to be in tucked mode
+ """
+ self.set_joint_positions(self.tucked_default_joint_pos)
+
+ def untuck(self):
+ """
+ Immediately set this robot's configuration to be in untucked mode
+ """
+ self.set_joint_positions(self.untucked_default_joint_pos)
+
+ def update_controller_mode(self):
+ super().update_controller_mode()
+ # overwrite joint params here
+ for i in range(7):
+ self.joints[f"panda_joint{i+1}"].damping = 1000
+ self.joints[f"panda_joint{i+1}"].stiffness = 1000
+ for i in range(16):
+ self.joints[f"joint_{i}_0"].damping = 100
+ self.joints[f"joint_{i}_0"].stiffness = 300
+ self.joints[f"joint_{i}_0"].max_effort = 15
+
+ @property
+ def controller_order(self):
+ return ["arm_{}".format(self.default_arm), "gripper_{}".format(self.default_arm)]
+
+ @property
+ def _default_controllers(self):
+ controllers = super()._default_controllers
+ controllers["arm_{}".format(self.default_arm)] = "InverseKinematicsController"
+ return controllers
+
+ @property
+ def default_joint_pos(self):
+ # position where the hand is parallel to the ground
+ return np.r_[[0.86, -0.27, -0.68, -1.52, -0.18, 1.29, 1.72], np.zeros(16)]
+
+ @property
+ def finger_lengths(self):
+ return {self.default_arm: 0.1}
+
+ @property
+ def arm_control_idx(self):
+ return {self.default_arm: np.arange(7)}
+
+ @property
+ def gripper_control_idx(self):
+ return {self.default_arm: np.arange(7, 23)}
+
+ @property
+ def arm_link_names(self):
+ return {self.default_arm: [f"panda_link{i}" for i in range(8)]}
+
+ @property
+ def arm_joint_names(self):
+ return {self.default_arm: [f"panda_joint_{i+1}" for i in range(7)]}
+
+ @property
+ def eef_link_names(self):
+ return {self.default_arm: "base_link"}
+
+ @property
+ def finger_link_names(self):
+ return {self.default_arm: [f"link_{i}_0" for i in range(16)]}
+
+ @property
+ def finger_joint_names(self):
+ return {self.default_arm: [f"joint_{i}_0" for i in range(16)]}
+
+ @property
+ def usd_path(self):
+ return os.path.join(gm.ASSET_PATH, "models/franka/franka_allegro.usd")
+
+ @property
+ def robot_arm_descriptor_yamls(self):
+ return {self.default_arm: os.path.join(gm.ASSET_PATH, "models/franka/franka_allegro_description.yaml")}
+
+ @property
+ def robot_gripper_descriptor_yamls(self):
+ return {
+ finger: os.path.join(gm.ASSET_PATH, f"models/franka/allegro_{finger}_description.yaml")
+ for finger in ["thumb", "index", "middle", "ring"]
+ }
+
+ @property
+ def urdf_path(self):
+ return os.path.join(gm.ASSET_PATH, "models/franka/franka_allegro.urdf")
+
+ @property
+ def gripper_urdf_path(self):
+ return os.path.join(gm.ASSET_PATH, "models/franka/allegro_hand.urdf")
+
+ @property
+ def disabled_collision_pairs(self):
+ return [
+ ["link_12_0", "part_studio_link"],
+ ]
diff --git a/omnigibson/robots/manipulation_robot.py b/omnigibson/robots/manipulation_robot.py
index 6927e7e31..1468bbf0f 100644
--- a/omnigibson/robots/manipulation_robot.py
+++ b/omnigibson/robots/manipulation_robot.py
@@ -31,7 +31,7 @@
m.ASSIST_GRASP_MASS_THRESHOLD = 10.0
m.ARTICULATED_ASSIST_FRACTION = 0.7
m.MIN_ASSIST_FORCE = 0
-m.MAX_ASSIST_FORCE = 500
+m.MAX_ASSIST_FORCE = 100
m.ASSIST_FORCE = m.MIN_ASSIST_FORCE + (m.MAX_ASSIST_FORCE - m.MIN_ASSIST_FORCE) * m.ASSIST_FRACTION
m.CONSTRAINT_VIOLATION_THRESHOLD = 0.1
m.RELEASE_WINDOW = 1 / 30.0 # release window in seconds
@@ -44,23 +44,6 @@
GraspingPoint = namedtuple("GraspingPoint", ["link_name", "position"]) # link_name (str), position (x,y,z tuple)
-def can_assisted_grasp(obj):
- """
- Check whether an object @obj can be grasped. This is done
- by checking its category to see if is in the allowlist.
-
- Args:
- obj (BaseObject): Object targeted for an assisted grasp
-
- Returns:
- bool: Whether or not this object can be grasped
- """
- # Use fallback based on mass
- mass = obj.mass
- print(f"Mass for AG: obj: {mass}, max mass: {m.ASSIST_GRASP_MASS_THRESHOLD}, obj: {obj.name}")
- return mass <= m.ASSIST_GRASP_MASS_THRESHOLD
-
-
class ManipulationRobot(BaseRobot):
"""
Robot that is is equipped with grasping (manipulation) capabilities.
@@ -106,6 +89,7 @@ def __init__(
# Unique to ManipulationRobot
grasping_mode="physical",
+ disable_grasp_handling=False,
**kwargs,
):
@@ -152,17 +136,21 @@ def __init__(
configurations for this object. This will override any default values specified by this class.
grasping_mode (str): One of {"physical", "assisted", "sticky"}.
If "physical", no assistive grasping will be applied (relies on contact friction + finger force).
- If "assisted", will magnetize any object touching and within the gripper's fingers.
- If "sticky", will magnetize any object touching the gripper's fingers.
+ If "assisted", will magnetize any object touching and within the gripper's fingers. In this mode,
+ at least two "fingers" need to touch the object.
+ If "sticky", will magnetize any object touching the gripper's fingers. In this mode, only one finger
+ needs to touch the object.
+ disable_grasp_handling (bool): If True, the robot will not automatically handle assisted or sticky grasps.
+ Instead, you will need to call the grasp handling methods yourself.
kwargs (dict): Additional keyword arguments that are used for other super() calls from subclasses, allowing
for flexible compositions of various object subclasses (e.g.: Robot is USDObject + ControllableObject).
"""
# Store relevant internal vars
assert_valid_key(key=grasping_mode, valid_keys=AG_MODES, name="grasping_mode")
self._grasping_mode = grasping_mode
+ self._disable_grasp_handling = disable_grasp_handling
# Initialize other variables used for assistive grasping
- self._ag_data = {arm: None for arm in self.arm_names}
self._ag_freeze_joint_pos = {
arm: {} for arm in self.arm_names
} # Frozen positions for keeping fingers held still
@@ -334,8 +322,11 @@ def set_position_orientation(self, position=None, orientation=None):
self._ag_obj_in_hand[arm].set_position_orientation(*T.mat2pose(hmat=new_obj_pose))
def apply_action(self, action):
- # First run assisted grasping
- if self.grasping_mode != "physical":
+ # Run super method as normal
+ super().apply_action(action)
+
+ # Then run assisted grasping
+ if self.grasping_mode != "physical" and not self._disable_grasp_handling:
self._handle_assisted_grasping(action=action)
# Potentially freeze gripper joints
@@ -343,9 +334,6 @@ def apply_action(self, action):
if self._ag_freeze_gripper[arm]:
self._freeze_gripper(arm)
- # Run super method as normal
- super().apply_action(action)
-
def deploy_control(self, control, control_type, indices=None, normalized=False):
# We intercept the gripper control and replace it with the current joint position if we're freezing our gripper
for arm in self.arm_names:
@@ -367,7 +355,6 @@ def _release_grasp(self, arm="default"):
# Remove joint and filtered collision restraints
og.sim.stage.RemovePrim(self._ag_obj_constraint_params[arm]["ag_joint_prim_path"])
- self._ag_data[arm] = None
self._ag_obj_constraints[arm] = None
self._ag_obj_constraint_params[arm] = {}
self._ag_freeze_gripper[arm] = False
@@ -383,6 +370,7 @@ def release_grasp_immediately(self):
self._release_grasp(arm=arm)
self._ag_release_counter[arm] = int(np.ceil(m.RELEASE_WINDOW / og.sim.get_rendering_dt()))
self._handle_release_window(arm=arm)
+ assert not self._ag_obj_in_hand[arm], "Object still in ag list after release!"
# TODO: Verify not needed!
# for finger_link in self.finger_links[arm]:
# finger_link.remove_filtered_collision_pair(prim=self._ag_obj_in_hand[arm])
@@ -639,6 +627,16 @@ def finger_lengths(self):
"""
raise NotImplementedError
+ @property
+ def arm_workspace_range(self):
+ """
+ Returns:
+ dict: Dictionary mapping arm appendage name to a tuple indicating the start and end of the
+ angular range of the arm workspace around the Z axis of the robot, where 0 is facing
+ forward.
+ """
+ raise NotImplementedError
+
def get_eef_position(self, arm="default"):
"""
Args:
@@ -757,7 +755,7 @@ def _calculate_in_hand_object_rigid(self, arm="default"):
# Make sure at least two fingers are in contact with this object
robot_contacts = robot_contact_links[ag_prim_path]
- touching_at_least_two_fingers = len({link.prim_path for link in self.finger_links[arm]}.intersection(robot_contacts)) >= 2
+ touching_at_least_two_fingers = True if self.grasping_mode == "sticky" else len({link.prim_path for link in self.finger_links[arm]}.intersection(robot_contacts)) >= 2
# TODO: Better heuristic, hacky, we assume the parent object prim path is the prim_path minus the last "/" item
ag_obj_prim_path = "/".join(ag_prim_path.split("/")[:-1])
@@ -765,7 +763,7 @@ def _calculate_in_hand_object_rigid(self, arm="default"):
ag_obj = og.sim.scene.object_registry("prim_path", ag_obj_prim_path)
# Return None if object cannot be assisted grasped or not touching at least two fingers
- if ag_obj is None or (not can_assisted_grasp(ag_obj)) or (not touching_at_least_two_fingers):
+ if ag_obj is None or not touching_at_least_two_fingers:
return None
# Get object and its contacted link
@@ -967,26 +965,27 @@ def _default_controller_config(self):
return cfg
- def _establish_grasp_rigid(self, arm="default", ag_data=None):
+ def _get_assisted_grasp_joint_type(self, ag_obj, ag_link):
"""
- Establishes an ag-assisted grasp, if enabled.
+ Check whether an object @obj can be grasped. If so, return the joint type to use for assisted grasping.
+ Otherwise, return None.
Args:
- arm (str): specific arm to establish grasp.
- Default is "default" which corresponds to the first entry in self.arm_names
- ag_data (None or 2-tuple): if specified, assisted-grasp object, link tuple (i.e. :(BaseObject, RigidPrim)).
- Otherwise, does a no-op
- """
- arm = self.default_arm if arm == "default" else arm
+ ag_obj (BaseObject): Object targeted for an assisted grasp
+ ag_link (RigidPrim): Link of the object to be grasped
- # Return immediately if ag_data is None
- if ag_data is None:
- return
- ag_obj, ag_link = ag_data
+ Returns:
+ (None or str): If obj can be grasped, returns the joint type to use for assisted grasping.
+ """
- # Create a p2p joint if it's a child link of a fixed URDF that is connected by a revolute or prismatic joint
+ # Deny objects that are too heavy and are not a non-base link of a fixed-base object)
+ mass = ag_link.mass
+ if mass > m.ASSIST_GRASP_MASS_THRESHOLD and not (ag_obj.fixed_base and ag_link != ag_obj.root_link):
+ return None
+
+ # Otherwise, compute the joint type. We use a fixed joint unless the link is a non-fixed link.
joint_type = "FixedJoint"
- if ag_obj.fixed_base:
+ if ag_obj.root_link != ag_link:
# We search up the tree path from the ag_link until we encounter the root (joint == 0) or a non fixed
# joint (e.g.: revolute or fixed)
link_handle = ag_link.handle
@@ -1001,12 +1000,37 @@ def _establish_grasp_rigid(self, arm="default", ag_data=None):
link_handle = self._dc.get_joint_parent_body(joint_handle)
joint_handle = self._dc.get_rigid_body_parent_joint(link_handle)
- force_data, _ = self._find_gripper_contacts(arm=arm, return_contact_positions=True)
- contact_pos = None
- for c_link_prim_path, c_contact_pos in force_data:
- if c_link_prim_path == ag_link.prim_path:
- contact_pos = np.array(c_contact_pos)
- break
+ return joint_type
+
+ def _establish_grasp_rigid(self, arm="default", ag_data=None, contact_pos=None):
+ """
+ Establishes an ag-assisted grasp, if enabled.
+
+ Args:
+ arm (str): specific arm to establish grasp.
+ Default is "default" which corresponds to the first entry in self.arm_names
+ ag_data (None or 2-tuple): if specified, assisted-grasp object, link tuple (i.e. :(BaseObject, RigidPrim)).
+ Otherwise, does a no-op
+ contact_pos (None or np.array): if specified, contact position to use for grasp.
+ """
+ arm = self.default_arm if arm == "default" else arm
+
+ # Return immediately if ag_data is None
+ if ag_data is None:
+ return
+ ag_obj, ag_link = ag_data
+
+ # Get the appropriate joint type
+ joint_type = self._get_assisted_grasp_joint_type(ag_obj, ag_link)
+ if joint_type is None:
+ return
+
+ if contact_pos is None:
+ force_data, _ = self._find_gripper_contacts(arm=arm, return_contact_positions=True)
+ for c_link_prim_path, c_contact_pos in force_data:
+ if c_link_prim_path == ag_link.prim_path:
+ contact_pos = np.array(c_contact_pos)
+ break
assert contact_pos is not None
# Joint frame set at the contact point
@@ -1048,6 +1072,7 @@ def _establish_grasp_rigid(self, arm="default", ag_data=None):
"joint_type": joint_type,
"gripper_pos": self.get_joint_positions()[self.gripper_control_idx[arm]],
"max_force": max_force,
+ "contact_pos": contact_pos,
}
self._ag_obj_in_hand[arm] = ag_obj
self._ag_freeze_gripper[arm] = True
@@ -1068,34 +1093,28 @@ def _handle_assisted_grasping(self, action):
"""
# Loop over all arms
for arm in self.arm_names:
- # Make sure gripper action dimension is only 1
- cmd_dim = self._controllers[f"gripper_{arm}"].command_dim
- assert cmd_dim == 1, \
- f"Gripper {arm} controller command dim must be 1 to use assisted grasping, got: {cmd_dim}."
-
- # TODO: Why are we separately checking for complementary conditions?
- threshold = np.mean(self._controllers[f"gripper_{arm}"].command_input_limits)
- applying_grasp = action[self.controller_action_idx[f"gripper_{arm}"][0]] < threshold
- releasing_grasp = action[self.controller_action_idx[f"gripper_{arm}"][0]] > threshold
+ # We apply a threshold based on the control rather than the command here so that the behavior
+ # stays the same across different controllers and control modes (absolute / delta). This way,
+ # a zero action will actually keep the AG setting where it already is.
+ # TODO: Compare this to the iG2 implementation to see if there could be a benefit to using
+ # a combination of control and existing position.
+ controller = self._controllers[f"gripper_{arm}"]
+ controlled_joints = controller.dof_idx
+ threshold = np.mean(np.array(self.control_limits["position"])[:, controlled_joints], axis=0)
+ applying_grasp = np.any(controller.control < threshold)
# Execute gradual release of object
if self._ag_obj_in_hand[arm]:
if self._ag_release_counter[arm] is not None:
self._handle_release_window(arm=arm)
else:
- # constraint_violated = (
- # get_constraint_violation(self._ag_obj_cid[arm]) > m.CONSTRAINT_VIOLATION_THRESHOLD
- # )
- # if constraint_violated or releasing_grasp:
if gm.AG_CLOTH:
self._update_constraint_cloth(arm=arm)
- if releasing_grasp:
+ if not applying_grasp:
self._release_grasp(arm=arm)
-
elif applying_grasp:
- self._ag_data[arm] = self._calculate_in_hand_object(arm=arm)
- self._establish_grasp(arm=arm, ag_data=self._ag_data[arm])
+ self._establish_grasp(arm=arm, ag_data=self._calculate_in_hand_object(arm=arm))
def _update_constraint_cloth(self, arm="default"):
"""
@@ -1120,11 +1139,11 @@ def _calculate_in_hand_object(self, arm="default"):
else:
return self._calculate_in_hand_object_rigid(arm)
- def _establish_grasp(self, arm="default", ag_data=None):
+ def _establish_grasp(self, arm="default", ag_data=None, contact_pos=None):
if gm.AG_CLOTH:
return self._establish_grasp_cloth(arm, ag_data)
else:
- return self._establish_grasp_rigid(arm, ag_data)
+ return self._establish_grasp_rigid(arm, ag_data, contact_pos)
def _calculate_in_hand_object_cloth(self, arm="default"):
"""
@@ -1231,6 +1250,7 @@ def _establish_grasp_cloth(self, arm="default", ag_data=None):
"gripper_pos": self.get_joint_positions()[self.gripper_control_idx[arm]],
"max_force": max_force,
"attachment_point_pos_local": attachment_point_pos_local,
+ "contact_pos": attachment_point_pos,
}
self._ag_obj_in_hand[arm] = ag_obj
self._ag_freeze_gripper[arm] = True
@@ -1249,18 +1269,30 @@ def _dump_state(self):
if self.grasping_mode == "physical":
return state
- # TODO: Include AG_state
-
+ # Include AG_state
+ state["ag_obj_constraint_params"] = self._ag_obj_constraint_params
return state
def _load_state(self, state):
+ # If there is an existing AG object, remove it
+ self.release_grasp_immediately()
+
super()._load_state(state=state)
# No additional loading needed if we're using physical grasping
if self.grasping_mode == "physical":
return
- # TODO: Include AG_state
+ # Include AG_state
+ # TODO: currently does not take care of cloth objects
+ # TODO: add unit tests
+ for arm in state["ag_obj_constraint_params"].keys():
+ if len(state["ag_obj_constraint_params"][arm]) > 0:
+ data = state["ag_obj_constraint_params"][arm]
+ obj = og.sim.scene.object_registry("prim_path", data["ag_obj_prim_path"])
+ link = obj.links[data["ag_link_prim_path"].split("/")[-1]]
+ self._ag_data[arm] = (obj, link)
+ self._establish_grasp(arm=arm, ag_data=self._ag_data[arm], contact_pos=data["contact_pos"])
def _serialize(self, state):
# Call super first
diff --git a/omnigibson/robots/robot_base.py b/omnigibson/robots/robot_base.py
index 266cf9a59..3fe65ea87 100644
--- a/omnigibson/robots/robot_base.py
+++ b/omnigibson/robots/robot_base.py
@@ -281,6 +281,7 @@ def _get_proprioception_dict(self):
joint_velocities = self.get_joint_velocities(normalized=False)
joint_efforts = self.get_joint_efforts(normalized=False)
pos, ori = self.get_position(), self.get_rpy()
+ ori_2d = self.get_2d_orientation()
return dict(
joint_qpos=joint_positions,
joint_qpos_sin=np.sin(joint_positions),
@@ -290,6 +291,9 @@ def _get_proprioception_dict(self):
robot_pos=pos,
robot_ori_cos=np.cos(ori),
robot_ori_sin=np.sin(ori),
+ robot_2d_ori=ori_2d,
+ robot_2d_ori_cos=np.cos(ori_2d),
+ robot_2d_ori_sin=np.sin(ori_2d),
robot_lin_vel=self.get_linear_velocity(),
robot_ang_vel=self.get_angular_velocity(),
)
diff --git a/omnigibson/robots/tiago.py b/omnigibson/robots/tiago.py
index 104fbd994..d727ee429 100644
--- a/omnigibson/robots/tiago.py
+++ b/omnigibson/robots/tiago.py
@@ -75,8 +75,10 @@ def __init__(
# Unique to ManipulationRobot
grasping_mode="physical",
+ disable_grasp_handling=False,
# Unique to Tiago
+ variant="default",
rigid_trunk=False,
default_trunk_offset=0.365,
default_arm_pose="vertical",
@@ -129,6 +131,9 @@ def __init__(
If "physical", no assistive grasping will be applied (relies on contact friction + finger force).
If "assisted", will magnetize any object touching and within the gripper's fingers.
If "sticky", will magnetize any object touching the gripper's fingers.
+ disable_grasp_handling (bool): If True, will disable all grasp handling for this object. This means that
+ sticky and assisted grasp modes will not work unless the connection/release methodsare manually called.
+ variant (str): Which variant of the robot should be loaded. One of "default", "wrist_cam"
rigid_trunk (bool) if True, will prevent the trunk from moving during execution.
default_trunk_offset (float): sets the default height of the robot's trunk
default_arm_pose (str): Default pose for the robot arm. Should be one of:
@@ -137,6 +142,8 @@ def __init__(
for flexible compositions of various object subclasses (e.g.: Robot is USDObject + ControllableObject).
"""
# Store args
+ assert variant in ("default", "wrist_cam"), f"Invalid Tiago variant specified {variant}!"
+ self._variant = variant
self.rigid_trunk = rigid_trunk
self.default_trunk_offset = default_trunk_offset
assert_valid_key(key=default_arm_pose, valid_keys=DEFAULT_ARM_POSES, name="default_arm_pose")
@@ -176,6 +183,7 @@ def __init__(
proprio_obs=proprio_obs,
sensor_config=sensor_config,
grasping_mode=grasping_mode,
+ disable_grasp_handling=disable_grasp_handling,
**kwargs,
)
@@ -505,7 +513,9 @@ def camera_control_idx(self):
@property
def arm_control_idx(self):
- return {"left": np.array([7, 10, 13, 15, 17, 19, 21]), "right": np.array([8, 11, 14, 16, 18, 20, 22])}
+ return {"left": np.array([7, 10, 13, 15, 17, 19, 21]),
+ "right": np.array([8, 11, 14, 16, 18, 20, 22]),
+ "combined": np.array([7, 8, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22])}
@property
def gripper_control_idx(self):
@@ -524,10 +534,115 @@ def disabled_collision_link_names(self):
@property
def disabled_collision_pairs(self):
return [
- ["torso_fixed_column_link", "torso_fixed_link"],
- ["torso_fixed_column_link", "torso_lift_link"],
- ["arm_left_6_link", "gripper_left_link"],
+ ["arm_left_1_link", "arm_left_2_link"],
+ ["arm_left_2_link", "arm_left_3_link"],
+ ["arm_left_3_link", "arm_left_4_link"],
+ ["arm_left_4_link", "arm_left_5_link"],
+ ["arm_left_5_link", "arm_left_6_link"],
+ ["arm_left_6_link", "arm_left_7_link"],
+ ["arm_right_1_link", "arm_right_2_link"],
+ ["arm_right_2_link", "arm_right_3_link"],
+ ["arm_right_3_link", "arm_right_4_link"],
+ ["arm_right_4_link", "arm_right_5_link"],
+ ["arm_right_5_link", "arm_right_6_link"],
+ ["arm_right_6_link", "arm_right_7_link"],
+ ["gripper_right_right_finger_link", "gripper_right_left_finger_link"],
+ ["gripper_right_link", "wrist_right_ft_link"],
["arm_right_6_link", "gripper_right_link"],
+ ["arm_right_6_link", "wrist_right_ft_tool_link"],
+ ["arm_right_6_link", "wrist_right_ft_link"],
+ ["arm_right_6_link", "arm_right_tool_link"],
+ ["arm_right_5_link", "wrist_right_ft_link"],
+ ["arm_right_5_link", "arm_right_tool_link"],
+ ["gripper_left_right_finger_link", "gripper_left_left_finger_link"],
+ ["gripper_left_link", "wrist_left_ft_link"],
+ ["arm_left_6_link", "gripper_left_link"],
+ ["arm_left_6_link", "wrist_left_ft_tool_link"],
+ ["arm_left_6_link", "wrist_left_ft_link"],
+ ["arm_left_6_link", "arm_left_tool_link"],
+ ["arm_left_5_link", "wrist_left_ft_link"],
+ ["arm_left_5_link", "arm_left_tool_link"],
+ ["torso_lift_link", "torso_fixed_column_link"],
+ ["torso_fixed_link", "torso_fixed_column_link"],
+ ["base_antenna_left_link", "torso_fixed_link"],
+ ["base_antenna_right_link", "torso_fixed_link"],
+ ["base_link", "wheel_rear_left_link"],
+ ["base_link", "wheel_rear_right_link"],
+ ["base_link", "wheel_front_left_link"],
+ ["base_link", "wheel_front_right_link"],
+ ["base_link", "base_dock_link"],
+ ["base_link", "base_antenna_right_link"],
+ ["base_link", "base_antenna_left_link"],
+ ["base_link", "torso_fixed_column_link"],
+ ["base_link", "suspension_front_left_link"],
+ ["base_link", "suspension_front_right_link"],
+ ["base_link", "torso_fixed_link"],
+ ["suspension_front_left_link", "wheel_front_left_link"],
+ ["torso_lift_link", "arm_right_1_link"],
+ ["torso_lift_link", "arm_right_2_link"],
+ ["torso_lift_link", "arm_left_1_link"],
+ ["torso_lift_link", "arm_left_2_link"],
+ ["arm_left_tool_link", "wrist_left_ft_link"],
+ ["wrist_left_ft_link", "wrist_left_ft_tool_link"],
+ ["wrist_left_ft_tool_link", "gripper_left_link"],
+ ['gripper_left_grasping_frame', 'gripper_left_left_finger_link'],
+ ['gripper_left_grasping_frame', 'gripper_left_right_finger_link'],
+ ['wrist_right_ft_link', 'arm_right_tool_link'],
+ ['wrist_right_ft_tool_link', 'wrist_right_ft_link'],
+ ['gripper_right_link', 'wrist_right_ft_tool_link'],
+ ['head_1_link', 'head_2_link'],
+ ['torso_fixed_column_link', 'arm_right_1_link'],
+ ['torso_fixed_column_link', 'arm_left_1_link'],
+ ['arm_left_1_link', 'arm_left_3_link'],
+ ['arm_right_1_link', 'arm_right_3_link'],
+ ['base_link', 'arm_right_4_link'],
+ ['base_link', 'arm_right_5_link'],
+ ['base_link', 'arm_left_4_link'],
+ ['base_link', 'arm_left_5_link'],
+ ['wrist_left_ft_tool_link', 'arm_left_5_link'],
+ ['wrist_right_ft_tool_link', 'arm_right_5_link'],
+ ['arm_left_tool_link', 'wrist_left_ft_tool_link'],
+ ['arm_right_tool_link', 'wrist_right_ft_tool_link']
+ ]
+
+ @property
+ def manipulation_link_names(self):
+ return [
+ "torso_fixed_link",
+ "torso_lift_link",
+ "arm_left_1_link",
+ "arm_left_2_link",
+ "arm_left_3_link",
+ "arm_left_4_link",
+ "arm_left_5_link",
+ "arm_left_6_link",
+ "arm_left_7_link",
+ "arm_left_tool_link",
+ "wrist_left_ft_link",
+ "wrist_left_ft_tool_link",
+ "gripper_left_link",
+ # "gripper_left_grasping_frame",
+ "gripper_left_left_finger_link",
+ "gripper_left_right_finger_link",
+ "gripper_left_tool_link",
+ "arm_right_1_link",
+ "arm_right_2_link",
+ "arm_right_3_link",
+ "arm_right_4_link",
+ "arm_right_5_link",
+ "arm_right_6_link",
+ "arm_right_7_link",
+ "arm_right_tool_link",
+ "wrist_right_ft_link",
+ "wrist_right_ft_tool_link",
+ "gripper_right_link",
+ # "gripper_right_grasping_frame",
+ "gripper_right_left_finger_link",
+ "gripper_right_right_finger_link",
+ "gripper_right_tool_link",
+ "head_1_link",
+ "head_2_link",
+ "xtion_link",
]
@property
@@ -550,16 +665,35 @@ def finger_joint_names(self):
@property
def usd_path(self):
+ if self._variant == "wrist_cam":
+ return os.path.join(gm.ASSET_PATH, "models/tiago/tiago_dual_omnidirectional_stanford/tiago_dual_omnidirectional_stanford_33_with_wrist_cam.usd")
+
+ # Default variant
return os.path.join(gm.ASSET_PATH, "models/tiago/tiago_dual_omnidirectional_stanford/tiago_dual_omnidirectional_stanford_33.usd")
+ @property
+ def simplified_mesh_usd_path(self):
+ # TODO: How can we make this more general - maybe some automatic way to generate these?
+ return os.path.join(gm.ASSET_PATH, "models/tiago/tiago_dual_omnidirectional_stanford/tiago_dual_omnidirectional_stanford_33_simplified_collision_mesh.usd")
+
@property
def robot_arm_descriptor_yamls(self):
+ # TODO: Remove the need to do this by making the arm descriptor yaml files generated automatically
return {"left": os.path.join(gm.ASSET_PATH, "models/tiago/tiago_dual_omnidirectional_stanford_left_arm_descriptor.yaml"),
- "right": os.path.join(gm.ASSET_PATH, "models/tiago/tiago_dual_omnidirectional_stanford_right_arm_fixed_trunk_descriptor.yaml")}
+ "left_fixed": os.path.join(gm.ASSET_PATH, "models/tiago/tiago_dual_omnidirectional_stanford_left_arm_fixed_trunk_descriptor.yaml"),
+ "right": os.path.join(gm.ASSET_PATH, "models/tiago/tiago_dual_omnidirectional_stanford_right_arm_fixed_trunk_descriptor.yaml"),
+ "combined": os.path.join(gm.ASSET_PATH, "models/tiago/tiago_dual_omnidirectional_stanford.yaml")}
@property
def urdf_path(self):
return os.path.join(gm.ASSET_PATH, "models/tiago/tiago_dual_omnidirectional_stanford.urdf")
+
+ @property
+ def arm_workspace_range(self):
+ return {
+ "left": [np.deg2rad(15), np.deg2rad(75)],
+ "right": [np.deg2rad(-75), np.deg2rad(-15)],
+ }
def get_position_orientation(self):
# If the simulator is playing, return the pose of the base_footprint link frame
@@ -576,6 +710,8 @@ def set_position_orientation(self, position=None, orientation=None):
position = current_position
if orientation is None:
orientation = current_orientation
+ assert np.isclose(np.linalg.norm(orientation), 1, atol=1e-3), \
+ f"{self.name} desired orientation {orientation} is not a unit quaternion."
# If the simulator is playing, set the 6 base joints to achieve the desired pose of base_footprint link frame
if self._dc is not None and self._dc.is_simulating():
@@ -601,7 +737,8 @@ def set_position_orientation(self, position=None, orientation=None):
# Move the joint frame for the world_base_joint
if self._world_base_fixed_joint_prim is not None:
self._world_base_fixed_joint_prim.GetAttribute("physics:localPos0").Set(tuple(position))
- self._world_base_fixed_joint_prim.GetAttribute("physics:localRot0").Set(Gf.Quatf(*orientation[[3, 0, 1, 2]]))
+ self._world_base_fixed_joint_prim.GetAttribute("physics:localRot0").Set(Gf.Quatf(
+ orientation[3], orientation[0], orientation[1], orientation[2]))
def set_linear_velocity(self, velocity: np.ndarray):
# Transform the desired linear velocity from the world frame to the root_link ("base_footprint_x") frame
diff --git a/omnigibson/scene_graphs/__init__.py b/omnigibson/scene_graphs/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/omnigibson/scene_graphs/graph_builder.py b/omnigibson/scene_graphs/graph_builder.py
new file mode 100644
index 000000000..d169184bb
--- /dev/null
+++ b/omnigibson/scene_graphs/graph_builder.py
@@ -0,0 +1,295 @@
+import itertools
+import os
+
+import networkx as nx
+import numpy as np
+from PIL import Image
+from matplotlib import pyplot as plt
+
+from omnigibson import object_states
+from omnigibson.macros import create_module_macros
+from omnigibson.sensors import VisionSensor
+from omnigibson.object_states.factory import get_state_name
+from omnigibson.object_states.object_state_base import AbsoluteObjectState, BooleanStateMixin, RelativeObjectState
+from omnigibson.utils import transform_utils as T
+
+
+def _formatted_aabb(obj):
+ return T.pose2mat((obj.aabb_center, [0, 0, 0, 1])), obj.aabb_extent
+
+
+class SceneGraphBuilder(object):
+ def __init__(
+ self,
+ robot_name=None,
+ egocentric=False,
+ full_obs=False,
+ only_true=False,
+ merge_parallel_edges=False,
+ exclude_states=(object_states.Touching,)
+ ):
+ """
+ A utility that builds a scene graph with objects as nodes and relative states as edges,
+ alongside additional metadata.
+
+ Args:
+ robot_name (str): Name of the robot whose POV the scene graph will be from. If None, we assert that there
+ is exactly one robot in the scene and use that robot.
+ egocentric (bool): Whether the objects should have poses in the world frame or robot frame.
+ full_obs (bool): Whether all objects should be updated or only those in FOV of the robot.
+ only_true (bool): Whether edges should be created only for relative states that have a value True, or for all
+ relative states (with the appropriate value attached as an attribute).
+ merge_parallel_edges (bool): Whether parallel edges (e.g. different states of the same pair of objects) should
+ exist (making the graph a MultiDiGraph) or should be merged into a single edge instead.
+ exclude_states (Iterable): Object state classes that should be ignored when building the graph.
+ """
+ self._G = None
+ self._robot = None
+ self._robot_name = robot_name
+ self._egocentric = egocentric
+ self._full_obs = full_obs
+ self._only_true = only_true
+ self._merge_parallel_edges = merge_parallel_edges
+ self._last_desired_frame_to_world = None
+ self._exclude_states = set(exclude_states)
+
+ def get_scene_graph(self):
+ return self._G.copy()
+
+ def _get_desired_frame(self):
+ desired_frame_to_world = np.eye(4)
+ world_to_desired_frame = np.eye(4)
+ if self._egocentric:
+ desired_frame_to_world = self._get_robot_to_world_transform()
+ world_to_desired_frame = T.pose_inv(desired_frame_to_world)
+
+ return desired_frame_to_world, world_to_desired_frame
+
+ def _get_robot_to_world_transform(self):
+ robot_to_world = self._robot.get_position_orientation()
+
+ # Get rid of any rotation outside xy plane
+ robot_to_world = T.pose2mat((robot_to_world[0], T.z_rotation_from_quat(robot_to_world[1])))
+
+ return robot_to_world
+
+ def _get_boolean_unary_states(self, obj):
+ states = {}
+ for state_type, state_inst in obj.states.items():
+ if not issubclass(state_type, BooleanStateMixin) or not issubclass(state_type, AbsoluteObjectState):
+ continue
+
+ if state_type in self._exclude_states:
+ continue
+
+ value = state_inst.get_value()
+ if self._only_true and not value:
+ continue
+
+ states[get_state_name(state_type)] = value
+
+ return states
+
+
+ def _get_boolean_binary_states(self, objs):
+ states = []
+ for obj1 in objs:
+ for obj2 in objs:
+ if obj1 == obj2:
+ continue
+
+ for state_type, state_inst in obj1.states.items():
+ if not issubclass(state_type, BooleanStateMixin) or not issubclass(state_type, RelativeObjectState):
+ continue
+
+ if state_type in self._exclude_states:
+ continue
+
+ try:
+ value = state_inst.get_value(obj2)
+ if self._only_true and not value:
+ continue
+
+ states.append((obj1, obj2, get_state_name(state_type), {"value": value}))
+ except:
+ pass
+
+ return states
+
+ def start(self, scene):
+ assert self._G is None, "Cannot start graph builder multiple times."
+
+ if self._robot_name is None:
+ assert len(scene.robots) == 1, "Cannot build scene graph without specifying robot name if there are multiple robots."
+ self._robot = scene.robots[0]
+ else:
+ self._robot = scene.object_registry("name", self._robot_name)
+ assert self._robot, f"Robot with name {self._robot_name} not found in scene."
+ self._G = nx.DiGraph() if self._merge_parallel_edges else nx.MultiDiGraph()
+
+ desired_frame_to_world, world_to_desired_frame = self._get_desired_frame()
+ robot_pose = world_to_desired_frame @ self._get_robot_to_world_transform()
+ robot_bbox_pose, robot_bbox_extent = _formatted_aabb(self._robot)
+ robot_bbox_pose = world_to_desired_frame @ robot_bbox_pose
+ self._G.add_node(
+ self._robot, pose=robot_pose, bbox_pose=robot_bbox_pose, bbox_extent=robot_bbox_extent, states={}
+ )
+ self._last_desired_frame_to_world = desired_frame_to_world
+
+ # Let's also take the first step.
+ self.step(scene)
+
+ def step(self, scene):
+ assert self._G is not None, "Cannot step graph builder before starting it."
+
+ # Prepare the necessary transformations.
+ desired_frame_to_world, world_to_desired_frame = self._get_desired_frame()
+
+ # Update the position of everything that's already in the scene by using our relative position to last frame.
+ old_desired_to_new_desired = world_to_desired_frame @ self._last_desired_frame_to_world
+ nodes = list(self._G.nodes)
+ poses = np.array([self._G.nodes[obj]["pose"] for obj in nodes])
+ bbox_poses = np.array([self._G.nodes[obj]["bbox_pose"] for obj in nodes])
+ updated_poses = old_desired_to_new_desired @ poses
+ updated_bbox_poses = old_desired_to_new_desired @ bbox_poses
+ for i, obj in enumerate(nodes):
+ self._G.nodes[obj]["pose"] = updated_poses[i]
+ self._G.nodes[obj]["bbox_pose"] = updated_bbox_poses[i]
+
+ # Update the robot's pose. We don't want to accumulate errors because of the repeated transforms.
+ self._G.nodes[self._robot]["pose"] = world_to_desired_frame @ self._get_robot_to_world_transform()
+ robot_bbox_pose, robot_bbox_extent = _formatted_aabb(self._robot)
+ robot_bbox_pose = world_to_desired_frame @ robot_bbox_pose
+ self._G.nodes[self._robot]["bbox_pose"] = robot_bbox_pose
+ self._G.nodes[self._robot]["bbox_extent"] = robot_bbox_extent
+
+ # Go through the objects in FOV of the robot.
+ objs_to_add = set(scene.objects)
+ if not self._full_obs:
+ # TODO: Reenable this once InFOV state is fixed.
+ # If we're not in full observability mode, only pick the objects in FOV of robot.
+ # bids_in_fov = self._robot.states[object_states.ObjectsInFOVOfRobot].get_value()
+ # objs_in_fov = set(
+ # scene.objects_by_id[bid]
+ # for bid in bids_in_fov
+ # if bid in scene.objects_by_id
+ # )
+ # objs_to_add &= objs_in_fov
+ raise NotImplementedError("Partial observability not supported in scene graph builder yet.")
+
+ for obj in objs_to_add:
+ # Add the object if not already in the graph
+ if obj not in self._G.nodes:
+ self._G.add_node(obj)
+
+ # Get the relative position of the object & update it (reducing accumulated errors)
+ self._G.nodes[obj]["pose"] = world_to_desired_frame @ T.pose2mat(obj.get_position_orientation())
+
+ # Get the bounding box.
+ if hasattr(obj, "get_base_aligned_bbox"):
+ bbox_center, bbox_orn, bbox_extent, _ = obj.get_base_aligned_bbox(visual=True, fallback_to_aabb=True)
+ bbox_pose = T.pose2mat((bbox_center, bbox_orn))
+ else:
+ bbox_pose, bbox_extent = _formatted_aabb(obj)
+ self._G.nodes[obj]["bbox_pose"] = world_to_desired_frame @ bbox_pose
+ self._G.nodes[obj]["bbox_extent"] = bbox_extent
+
+ # Update the states of the object
+ self._G.nodes[obj]["states"] = self._get_boolean_unary_states(obj)
+
+ # Update the binary states for seen objects.
+ self._G.remove_edges_from(list(itertools.product(objs_to_add, objs_to_add)))
+ edges = self._get_boolean_binary_states(objs_to_add)
+ if self._merge_parallel_edges:
+ new_edges = {}
+ for edge in edges:
+ edge_pair = (edge[0], edge[1])
+ if edge_pair not in new_edges:
+ new_edges[edge_pair] = []
+
+ new_edges[edge_pair].append((edge[2], edge[3]["value"]))
+
+ edges = [(k[0], k[1], {"states": v}) for k, v in new_edges.items()]
+
+ self._G.add_edges_from(edges)
+
+ # Save the robot's transform in this frame.
+ self._last_desired_frame_to_world = desired_frame_to_world
+
+
+def visualize_scene_graph(scene, G, show_window=True, realistic_positioning=False):
+ """
+ Converts the graph into an image and shows it in a cv2 window if preferred.
+
+ Args:
+ show_window (bool): Whether a cv2 GUI window containing the visualization should be shown.
+ realistic_positioning (bool): Whether nodes should be positioned based on their position in the scene (if True)
+ or placed using a graphviz layout (neato) that makes it easier to read edges & find clusters.
+ """
+
+ def _draw_graph():
+ nodes = list(G.nodes)
+ node_labels = {obj: obj.category for obj in nodes}
+ colors = [
+ "yellow"
+ if obj.category == "agent"
+ else ("green" if obj.states[object_states.InFOVOfRobot].get_value() else "red")
+ for obj in nodes
+ ]
+ positions = (
+ {obj: (-pose[0][1], pose[0][0]) for obj, pose in G.nodes.data("pose")}
+ if realistic_positioning
+ else nx.nx_pydot.pydot_layout(G, prog="neato")
+ )
+ nx.drawing.draw_networkx(
+ G,
+ pos=positions,
+ labels=node_labels,
+ nodelist=nodes,
+ node_color=colors,
+ font_size=4,
+ arrowsize=5,
+ node_size=150,
+ )
+
+ edge_labels = {
+ edge: ", ".join(
+ state + "=" + str(value)
+ for state, value in G.edges[edge]["states"]
+ )
+ for edge in G.edges
+ }
+ nx.drawing.draw_networkx_edge_labels(G, pos=positions, edge_labels=edge_labels, font_size=4)
+
+ # Prepare pyplot figure that's sized to match the robot video.
+ robot = scene.robots[0]
+ robot_camera_sensor, = [s for s in robot.sensors.values() if isinstance(s, VisionSensor) and "rgb" in s.modalities]
+ robot_view = (robot_camera_sensor.get_obs()["rgb"][..., :3]).astype(np.uint8)
+ imgheight, imgwidth, _ = robot_view.shape
+
+ figheight = 4.8
+ figdpi = imgheight / figheight
+ figwidth = imgwidth / figdpi
+
+ # Draw the graph onto the figure.
+ fig = plt.figure(figsize=(figwidth, figheight), dpi=figdpi)
+ _draw_graph()
+ fig.canvas.draw()
+
+ # Convert the canvas to image
+ graph_view = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
+ graph_view = graph_view.reshape(fig.canvas.get_width_height()[::-1] + (3,))
+ assert graph_view.shape == robot_view.shape
+ plt.close(fig)
+
+ # Combine the two images side-by-side
+ img = np.hstack((robot_view, graph_view))
+
+ # # Convert to BGR for cv2-based viewing.
+ if show_window:
+ import cv2
+ cv_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
+ cv2.imshow("SceneGraph", cv_img)
+ cv2.waitKey(1)
+
+ return Image.fromarray(img).save(r"D:\test.png")
diff --git a/omnigibson/scenes/interactive_traversable_scene.py b/omnigibson/scenes/interactive_traversable_scene.py
index 4fa9a15bf..eb42064aa 100644
--- a/omnigibson/scenes/interactive_traversable_scene.py
+++ b/omnigibson/scenes/interactive_traversable_scene.py
@@ -4,6 +4,7 @@
from omnigibson.scenes.traversable_scene import TraversableScene
from omnigibson.maps.segmentation_map import SegmentationMap
from omnigibson.utils.asset_utils import get_og_scene_path
+from omnigibson.utils.constants import STRUCTURE_CATEGORIES
from omnigibson.utils.ui_utils import create_module_logger
# Create module logger
@@ -31,6 +32,7 @@ def __init__(
not_load_object_categories=None,
load_room_types=None,
load_room_instances=None,
+ load_task_relevant_only=False,
seg_map_resolution=0.1,
include_robots=True,
):
@@ -51,6 +53,7 @@ def __init__(
not_load_object_categories (None or list): if specified, do not load these object categories into the scene
load_room_types (None or list): only load objects in these room types into the scene
load_room_instances (None or list): if specified, only load objects in these room instances into the scene
+ load_task_relevant_only (bool): Whether only task relevant objects (and building structure) should be loaded
seg_map_resolution (float): room segmentation map resolution
include_robots (bool): whether to also include the robot(s) defined in the scene
"""
@@ -65,6 +68,7 @@ def __init__(
self.load_object_categories = None
self.not_load_object_categories = None
self.load_room_instances = None
+ self.load_task_relevant_only = load_task_relevant_only
# Get scene information
if scene_file is None:
@@ -161,7 +165,8 @@ def _load(self):
if self.has_connectivity_graph:
self._trav_map.load_map(maps_path)
- def _should_load_object(self, obj_info):
+ def _should_load_object(self, obj_info, task_metadata):
+ name = obj_info["args"]["name"]
category = obj_info["args"].get("category", "object")
in_rooms = obj_info["args"].get("in_rooms", None)
@@ -173,7 +178,16 @@ def _should_load_object(self, obj_info):
not_blacklisted = self.not_load_object_categories is None or category not in self.not_load_object_categories
# Only load these object categories (no need to white list building structures)
- whitelisted = self.load_object_categories is None or category in self.load_object_categories
+ task_relevant_names = set(task_metadata["inst_to_name"].values()) if "inst_to_name" in task_metadata else set()
+ is_task_relevant = name in task_relevant_names or category in STRUCTURE_CATEGORIES
+ whitelisted = (
+ # Either no whitelisting-only mode is on
+ (self.load_object_categories is None and not self.load_task_relevant_only) or
+ # Or the object is in the whitelist
+ (self.load_object_categories is not None and category in self.load_object_categories) or
+ # Or it's in the task relevant list
+ (self.load_task_relevant_only and is_task_relevant)
+ )
# This object is not located in one of the selected rooms, skip
valid_room = self.load_room_instances is None or len(set(self.load_room_instances) & set(in_rooms)) > 0
diff --git a/omnigibson/scenes/scene_base.py b/omnigibson/scenes/scene_base.py
index 296814f6f..08c4f78c7 100644
--- a/omnigibson/scenes/scene_base.py
+++ b/omnigibson/scenes/scene_base.py
@@ -31,7 +31,7 @@
class Scene(Serializable, Registerable, Recreatable, ABC):
"""
Base class for all Scene objects.
- Contains the base functionalities for an arbitary scene with an arbitrary set of added objects
+ Contains the base functionalities for an arbitrary scene with an arbitrary set of added objects
"""
def __init__(
self,
@@ -197,6 +197,11 @@ def _load_objects_from_scene_file(self):
init_info = scene_info["objects_info"]["init_info"]
init_state = scene_info["state"]["object_registry"]
init_systems = scene_info["state"]["system_registry"].keys()
+ task_metadata = {}
+ try:
+ task_metadata = scene_info["metadata"]["task"]
+ except:
+ pass
# Create desired systems
for system_name in init_systems:
@@ -206,7 +211,7 @@ def _load_objects_from_scene_file(self):
# accordingly
for obj_name, obj_info in init_info.items():
# Check whether we should load the object or not
- if not self._should_load_object(obj_info=obj_info):
+ if not self._should_load_object(obj_info=obj_info, task_metadata=task_metadata):
continue
# Create object class instance
obj = create_object_from_init_info(obj_info)
@@ -229,7 +234,7 @@ def _load_metadata_from_scene_file(self):
for key, data in scene_info.get("metadata", dict()).items():
og.sim.write_metadata(key=key, data=data)
- def _should_load_object(self, obj_info):
+ def _should_load_object(self, obj_info, task_metadata):
"""
Helper function to check whether we should load an object given its init_info. Useful for potentially filtering
objects based on, e.g., their category, size, etc.
diff --git a/omnigibson/tasks/behavior_task.py b/omnigibson/tasks/behavior_task.py
index 395e30361..ebfd8e3a1 100644
--- a/omnigibson/tasks/behavior_task.py
+++ b/omnigibson/tasks/behavior_task.py
@@ -5,6 +5,7 @@
evaluate_goal_conditions,
get_goal_conditions,
get_ground_goal_state_options,
+ get_natural_initial_conditions,
get_initial_conditions,
get_natural_goal_conditions,
get_object_scope,
@@ -254,6 +255,7 @@ def update_activity(self, activity_name, activity_definition_id, predefined_prob
np.random.shuffle(self.instruction_order)
self.currently_viewed_index = 0
self.currently_viewed_instruction = self.instruction_order[self.currently_viewed_index]
+ self.activity_natural_language_initial_conditions = get_natural_initial_conditions(self.activity_conditions)
self.activity_natural_language_goal_conditions = get_natural_goal_conditions(self.activity_conditions)
def get_potential(self, env):
diff --git a/omnigibson/transition_rules.py b/omnigibson/transition_rules.py
index 4fbd1d9c1..305a40521 100644
--- a/omnigibson/transition_rules.py
+++ b/omnigibson/transition_rules.py
@@ -153,6 +153,16 @@ def step(cls):
added_obj_attrs += output.add
removed_objs += output.remove
+ cls.execute_transition(added_obj_attrs=added_obj_attrs, removed_objs=removed_objs)
+
+ @classmethod
+ def execute_transition(cls, added_obj_attrs, removed_objs):
+ """
+ Executes the transition for the given added and removed objects.
+
+ :param added_obj_attrs: List of ObjectAttrs instances to add to the scene
+ :param removed_objs: List of BaseObject instances to remove from the scene
+ """
# Process all transition results
if len(removed_objs) > 0:
disclaimer(
@@ -722,6 +732,10 @@ def transition(cls, object_candidates):
# Object parts offset annotation are w.r.t the base link of the whole object.
pos, orn = sliceable_obj.get_position_orientation()
+ # If it has no parts, silently fail
+ if not sliceable_obj.metadata["object_parts"]:
+ continue
+
# Load object parts
for i, part in enumerate(sliceable_obj.metadata["object_parts"].values()):
# List of dicts gets replaced by {'0':dict, '1':dict, ...}
@@ -1266,10 +1280,13 @@ def _execute_recipe(cls, container, recipe, in_volume):
system.remove_all_group_particles(group=group_name)
# Remove either all objects or only the recipe-relevant objects inside the container
- objs_to_remove.extend(np.concatenate([
- cls._OBJECTS[np.where(in_volume[cls._CATEGORY_IDXS[obj_category]])[0]]
- for obj_category in recipe["input_objects"].keys()
- ]) if cls.ignore_nonrecipe_objects else cls._OBJECTS[np.where(in_volume)[0]])
+ object_mask = in_volume.copy()
+ if cls.ignore_nonrecipe_objects:
+ object_category_mask = np.zeros_like(object_mask, dtype=bool)
+ for obj_category in recipe["input_objects"].keys():
+ object_category_mask[cls._CATEGORY_IDXS[obj_category]] = True
+ object_mask &= object_category_mask
+ objs_to_remove.extend(cls._OBJECTS[object_mask])
volume += sum(obj.volume for obj in objs_to_remove)
# Define callback for spawning new objects inside container
diff --git a/omnigibson/utils/constants.py b/omnigibson/utils/constants.py
index d5757c752..71d0656a5 100644
--- a/omnigibson/utils/constants.py
+++ b/omnigibson/utils/constants.py
@@ -66,6 +66,9 @@ class ParticleModifyCondition(IntEnum):
# Valid omni characters for specifying strings, e.g. prim paths
VALID_OMNI_CHARS = frozenset({'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '_', '/'})
+# Structure categories that need to always be loaded for stability purposes
+STRUCTURE_CATEGORIES = frozenset({"floors", "walls", "ceilings", "lawn", "driveway", "fence"})
+
# Note that we are starting this from bit 6 since bullet seems to be giving special meaning to groups 0-5.
# Collision groups for objects. For special logic, different categories can be assigned different collision groups.
ALL_COLLISION_GROUPS_MASK = -1
diff --git a/omnigibson/utils/control_utils.py b/omnigibson/utils/control_utils.py
index ed68ee8dc..8a327f1c8 100644
--- a/omnigibson/utils/control_utils.py
+++ b/omnigibson/utils/control_utils.py
@@ -4,6 +4,55 @@
import lula
import numpy as np
import omnigibson.utils.transform_utils as T
+from omnigibson.utils.sim_utils import meets_minimum_isaac_version
+
+class FKSolver:
+ """
+ Class for thinly wrapping Lula Forward Kinematics solver
+ """
+
+ def __init__(
+ self,
+ robot_description_path,
+ robot_urdf_path,
+ ):
+ # Create robot description and kinematics
+ self.robot_description = lula.load_robot(robot_description_path, robot_urdf_path)
+ self.kinematics = self.robot_description.kinematics()
+
+ def get_link_poses(
+ self,
+ joint_positions,
+ link_names,
+ ):
+ """
+ Given @joint_positions, get poses of the desired links (specified by @link_names)
+
+ Args:
+ joint positions (n-array): Joint positions in configuration space
+ link_names (list): List of robot link names we want to specify (e.g. "gripper_link")
+
+ Returns:
+ link_poses (dict): Dictionary mapping each robot link name to its pose
+ """
+ # TODO: Refactor this to go over all links at once
+ link_poses = {}
+ for link_name in link_names:
+ pose3_lula = self.kinematics.pose(joint_positions, link_name)
+
+ # get position
+ link_position = pose3_lula.translation
+
+ # get orientation
+ rotation_lula = pose3_lula.rotation
+ link_orientation = (
+ rotation_lula.x(),
+ rotation_lula.y(),
+ rotation_lula.z(),
+ rotation_lula.w(),
+ )
+ link_poses[link_name] = (link_position, link_orientation)
+ return link_poses
class IKSolver:
@@ -42,8 +91,8 @@ def solve(
Args:
target_pos (3-array): desired (x,y,z) local target cartesian position in robot's base coordinate frame
target_quat (4-array or None): If specified, desired (x,y,z,w) local target quaternion orientation in
- robot's base coordinate frame. If None, IK will be position-only (will override settings such that
- orientation's tolerance is very high and weight is 0)
+ robot's base coordinate frame. If None, IK will be position-only (will override settings such that
+ orientation's tolerance is very high and weight is 0)
tolerance_pos (float): Maximum position error (L2-norm) for a successful IK solution
tolerance_quat (float): Maximum orientation error (per-axis L2-norm) for a successful IK solution
weight_pos (float): Weight for the relative importance of position error during CCD
@@ -65,10 +114,19 @@ def solve(
self.config.cspace_seeds = [initial_joint_pos]
self.config.position_tolerance = tolerance_pos
self.config.orientation_tolerance = 100.0 if target_quat is None else tolerance_quat
- self.config.position_weight = weight_pos
- self.config.orientation_weight = 0.0 if target_quat is None else weight_quat
- self.config.max_iterations_per_descent = max_iterations
+
+ if meets_minimum_isaac_version("2023.0.0"):
+ self.config.ccd_position_weight = weight_pos
+ self.config.ccd_orientation_weight = 0.0 if target_quat is None else weight_quat
+ self.config.max_num_descents = max_iterations
+ else:
+ self.config.position_weight = weight_pos
+ self.config.orientation_weight = 0.0 if target_quat is None else weight_quat
+ self.config.max_iterations_per_descent = max_iterations
# Compute target joint positions
ik_results = lula.compute_ik_ccd(self.kinematics, ik_target_pose, self.eef_name, self.config)
- return np.array(ik_results.cspace_position)
+ if ik_results.success:
+ return np.array(ik_results.cspace_position)
+ else:
+ return None
\ No newline at end of file
diff --git a/omnigibson/utils/deprecated_utils.py b/omnigibson/utils/deprecated_utils.py
index 02bcf45d0..b4df02c6b 100644
--- a/omnigibson/utils/deprecated_utils.py
+++ b/omnigibson/utils/deprecated_utils.py
@@ -22,7 +22,7 @@ def __init__(self, prim_type: str, **kwargs):
prim_type (str): It supports Plane/Sphere/Cone/Cylinder/Disk/Torus/Cube.
kwargs:
- object_origin (Gf.Vec3f): Position of mesh center.
+ object_origin (Gf.Vec3f): Position of mesh center in stage units.
u_patches (int): The number of patches to tessellate U direction.
@@ -31,7 +31,7 @@ def __init__(self, prim_type: str, **kwargs):
w_patches (int): The number of patches to tessellate W direction.
It only works for Cone/Cylinder/Cube.
- half_scale (float): Half size of mesh. Default None.
+ half_scale (float): Half size of mesh in centimeters. Default is None, which means it's controlled by settings.
u_verts_scale (int): Tessellation Level of U. It's a multiplier of u_patches.
@@ -42,23 +42,22 @@ def __init__(self, prim_type: str, **kwargs):
For Cone/Cylinder, it's to tessellate the caps.
For Cube, it's to tessellate along z-axis.
+ above_ground (bool): It will offset the center of mesh above the ground plane if it's True,
+ False otherwise. It's False by default. This param only works when param object_origin is not given.
+ Otherwise, it will be ignored.
+
stage (Usd.Stage): If specified, stage to create prim on
"""
+
self._prim_type = prim_type[0:1].upper() + prim_type[1:].lower()
self._usd_context = omni.usd.get_context()
self._selection = self._usd_context.get_selection()
self._stage = kwargs.get("stage", self._usd_context.get_stage())
self._settings = carb.settings.get_settings()
- self._prim_path = None
- self._default_path = None
- self._prepend_default_prim = True
- if "prim_path" in kwargs and kwargs["prim_path"]:
- self._default_path = Sdf.Path(kwargs["prim_path"])
- self._select_new_prim = True
- if "select_new_prim" in kwargs:
- self._select_new_prim = bool(kwargs["select_new_prim"])
- if "prepend_default_prim" in kwargs:
- self._prepend_default_prim = bool(kwargs["prepend_default_prim"])
+ self._default_path = kwargs.get("prim_path", None)
+ self._select_new_prim = kwargs.get("select_new_prim", True)
+ self._prepend_default_prim = kwargs.get("prepend_default_prim", True)
+ self._above_round = kwargs.get("above_ground", False)
self._attributes = {**kwargs}
# Supported mesh types should have an associated evaluator class
@@ -66,7 +65,7 @@ def __init__(self, prim_type: str, **kwargs):
assert isinstance(self._evaluator_class, type)
-class Utils(OmniUtils):
+class Utils2022(OmniUtils):
"""
Subclass that overrides a specific function within Omni's Utils class to fix a bug
"""
@@ -101,13 +100,39 @@ def create_material(self, name):
return [material_path]
+class Utils2023(OmniUtils):
+ def create_material(self, name):
+ material_url = carb.settings.get_settings().get("/exts/omni.particle.system.core/material")
+
+ # TODO: THIS IS THE ONLY LINE WE CHANGE! "/" SHOULD BE ""
+ material_path = ""
+ default_prim = self.stage.GetDefaultPrim()
+ if default_prim:
+ material_path = default_prim.GetPath().pathString
+
+ if not self.stage.GetPrimAtPath(material_path + "/Looks"):
+ self.stage.DefinePrim(material_path + "/Looks", "Scope")
+ material_path += "/Looks/" + name
+ material_path = ou.get_stage_next_free_path(
+ self.stage, material_path, False
+ )
+ prim = self.stage.DefinePrim(material_path, "Material")
+ if material_url:
+ prim.GetReferences().AddReference(material_url)
+ else:
+ carb.log_error("Failed to find material URL in settings")
+
+ return [material_path]
+
+
class Core(OmniCore):
"""
Subclass that overrides a specific function within Omni's Core class to fix a bug
"""
def __init__(self, popup_callback: Callable[[str], None], particle_system_name: str):
self._popup_callback = popup_callback
- self.utils = Utils() # TODO: THIS IS THE ONLY LINE THAT WE CHANGE! ONCE FIXED, REMOVE THIS
+ from omnigibson.utils.sim_utils import meets_minimum_isaac_version
+ self.utils = Utils2023() if meets_minimum_isaac_version("2023.0.0") else Utils2022()
self.context = ou.get_context()
self.stage = self.context.get_stage()
self.selection = self.context.get_selection()
@@ -123,7 +148,7 @@ def get_compute_graph(self, selected_paths, create_new_graph=True, created_paths
"""
graph = None
graph_paths = [path for path in selected_paths
- if self.stage.GetPrimAtPath(path).GetTypeName() == "ComputeGraph"]
+ if self.stage.GetPrimAtPath(path).GetTypeName() in ["ComputeGraph", "OmniGraph"] ]
if len(graph_paths) > 0:
graph = ogc.get_graph_by_path(graph_paths[0])
@@ -131,6 +156,7 @@ def get_compute_graph(self, selected_paths, create_new_graph=True, created_paths
carb.log_warn(f"Multiple ComputeGraph prims selected. Only the first will be used: {graph.get_path_to_graph()}")
elif create_new_graph:
# If no graph was found in the selected prims, we'll make a new graph.
+ # TODO: THIS IS THE ONLY LINE THAT WE CHANGE! ONCE FIXED, REMOVE THIS
graph_path = Sdf.Path(f"/OmniGraph/{self.particle_system_name}").MakeAbsolutePath(Sdf.Path.absoluteRootPath)
graph_path = ou.get_stage_next_free_path(self.stage, graph_path, True)
@@ -159,4 +185,3 @@ def get_compute_graph(self, selected_paths, create_new_graph=True, created_paths
carb.log_info(f"No ComputeGraph selected. A new graph has been created at {graph.get_path_to_graph()}")
return graph
-
diff --git a/omnigibson/utils/grasping_planning_utils.py b/omnigibson/utils/grasping_planning_utils.py
new file mode 100644
index 000000000..43fb2719c
--- /dev/null
+++ b/omnigibson/utils/grasping_planning_utils.py
@@ -0,0 +1,493 @@
+import numpy as np
+import random
+from scipy.spatial.transform import Rotation as R, Slerp
+from math import ceil
+from omnigibson.macros import create_module_macros
+
+import omnigibson.utils.transform_utils as T
+from omnigibson.object_states.open_state import _get_relevant_joints
+from omnigibson.utils.constants import JointType, JointAxis
+from omni.isaac.core.utils.rotations import gf_quat_to_np_array
+
+
+m = create_module_macros(module_path=__file__)
+
+m.REVOLUTE_JOINT_FRACTION_ACROSS_SURFACE_AXIS_BOUNDS = (0.4, 0.6)
+m.PRISMATIC_JOINT_FRACTION_ACROSS_SURFACE_AXIS_BOUNDS = (0.2, 0.8)
+m.ROTATION_ARC_SEGMENT_LENGTHS = 0.05
+m.OPENNESS_THRESHOLD_TO_OPEN = 0.8
+m.OPENNESS_THRESHOLD_TO_CLOSE = 0.05
+
+
+def get_grasp_poses_for_object_sticky(target_obj):
+ """
+ Obtain a grasp pose for an object from top down, to be used with sticky grasping.
+
+ Args:
+ target_object (StatefulObject): Object to get a grasp pose for
+
+ Returns:
+ List of grasp candidates, where each grasp candidate is a tuple containing the grasp pose and the approach direction.
+ """
+ bbox_center_in_world, bbox_quat_in_world, bbox_extent_in_base_frame, _ = target_obj.get_base_aligned_bbox(
+ visual=False
+ )
+
+ grasp_center_pos = bbox_center_in_world + np.array([0, 0, np.max(bbox_extent_in_base_frame) + 0.05])
+ towards_object_in_world_frame = bbox_center_in_world - grasp_center_pos
+ towards_object_in_world_frame /= np.linalg.norm(towards_object_in_world_frame)
+
+ grasp_quat = T.euler2quat([0, np.pi/2, 0])
+
+ grasp_pose = (grasp_center_pos, grasp_quat)
+ grasp_candidate = [(grasp_pose, towards_object_in_world_frame)]
+
+ return grasp_candidate
+
+
+def get_grasp_poses_for_object_sticky_from_arbitrary_direction(target_obj):
+ """
+ Obtain a grasp pose for an object from an arbitrary direction to be used with sticky grasping.
+
+ Args:
+ target_object (StatefulObject): Object to get a grasp pose for
+
+ Returns:
+ List of grasp candidates, where each grasp candidate is a tuple containing the grasp pose and the approach direction.
+ """
+ bbox_center_in_world, bbox_quat_in_world, bbox_extent_in_base_frame, _ = target_obj.get_base_aligned_bbox(
+ visual=False
+ )
+
+ # Pick an axis and a direction.
+ approach_axis = random.choice([0, 1, 2])
+ approach_direction = random.choice([-1, 1]) if approach_axis != 2 else 1
+ constant_dimension_in_base_frame = approach_direction * bbox_extent_in_base_frame * np.eye(3)[approach_axis]
+ randomizable_dimensions_in_base_frame = bbox_extent_in_base_frame - np.abs(constant_dimension_in_base_frame)
+ random_dimensions_in_base_frame = np.random.uniform([-1, -1, 0], [1, 1, 1]) # note that we don't allow going below center
+ grasp_center_in_base_frame = random_dimensions_in_base_frame * randomizable_dimensions_in_base_frame + constant_dimension_in_base_frame
+
+ grasp_center_pos = T.mat2pose(
+ T.pose2mat((bbox_center_in_world, bbox_quat_in_world)) @ # base frame to world frame
+ T.pose2mat((grasp_center_in_base_frame, [0, 0, 0, 1])) # grasp pose in base frame
+ )[0] + np.array([0, 0, 0.02])
+ towards_object_in_world_frame = bbox_center_in_world - grasp_center_pos
+ towards_object_in_world_frame /= np.linalg.norm(towards_object_in_world_frame)
+
+ # For the grasp, we want the X+ direction to be the direction of the object's surface.
+ # The other two directions can be randomized.
+ rand_vec = np.random.rand(3)
+ rand_vec /= np.linalg.norm(rand_vec)
+ grasp_x = towards_object_in_world_frame
+ grasp_y = np.cross(rand_vec, grasp_x)
+ grasp_y /= np.linalg.norm(grasp_y)
+ grasp_z = np.cross(grasp_x, grasp_y)
+ grasp_z /= np.linalg.norm(grasp_z)
+ grasp_mat = np.array([grasp_x, grasp_y, grasp_z]).T
+ grasp_quat = R.from_matrix(grasp_mat).as_quat()
+
+ grasp_pose = (grasp_center_pos, grasp_quat)
+ grasp_candidate = [(grasp_pose, towards_object_in_world_frame)]
+
+ return grasp_candidate
+
+
+def get_grasp_position_for_open(robot, target_obj, should_open, relevant_joint=None, num_waypoints="default"):
+ """
+ Computes the grasp position for opening or closing a joint.
+
+ Args:
+ robot: the robot object
+ target_obj: the object to open/close a joint of
+ should_open: a boolean indicating whether we are opening or closing
+ relevant_joint: the joint to open/close if we want to do a particular one in advance
+ num_waypoints: the number of waypoints to interpolate between the start and end poses (default is "default")
+
+ Returns:
+ None (if no grasp was found), or Tuple, containing:
+ relevant_joint: the joint that is being targeted for open/close by the returned grasp
+ offset_grasp_pose_in_world_frame: the grasp pose in the world frame
+ waypoints: the interpolated waypoints between the start and end poses
+ approach_direction_in_world_frame: the approach direction in the world frame
+ grasp_required: a boolean indicating whether a grasp is required for the opening/closing based on which side of the joint we are
+ required_pos_change: the required change in position of the joint to open/close
+ """
+ # Pick a moving link of the object.
+ relevant_joints = [relevant_joint] if relevant_joint is not None else _get_relevant_joints(target_obj)[1]
+ if len(relevant_joints) == 0:
+ raise ValueError("Cannot open/close object without relevant joints.")
+
+ # Make sure what we got is an appropriately open/close joint.
+ random.shuffle(relevant_joints)
+ selected_joint = None
+ for joint in relevant_joints:
+ current_position = joint.get_state()[0][0]
+ joint_range = joint.upper_limit - joint.lower_limit
+ openness_fraction = (current_position - joint.lower_limit) / joint_range
+ if (should_open and openness_fraction < m.OPENNESS_FRACTION_TO_OPEN) or (not should_open and openness_fraction > m.OPENNESS_THRESHOLD_TO_CLOSE):
+ selected_joint = joint
+ break
+
+ if selected_joint is None:
+ return None
+
+ if selected_joint.joint_type == JointType.JOINT_REVOLUTE:
+ return (selected_joint,) + grasp_position_for_open_on_revolute_joint(robot, target_obj, selected_joint, should_open, num_waypoints=num_waypoints)
+ elif selected_joint.joint_type == JointType.JOINT_PRISMATIC:
+ return (selected_joint,) + grasp_position_for_open_on_prismatic_joint(robot, target_obj, selected_joint, should_open, num_waypoints=num_waypoints)
+ else:
+ raise ValueError("Unknown joint type encountered while generating joint position.")
+
+
+def grasp_position_for_open_on_prismatic_joint(robot, target_obj, relevant_joint, should_open, num_waypoints="default"):
+ """
+ Computes the grasp position for opening or closing a prismatic joint.
+
+ Args:
+ robot: the robot object
+ target_obj: the object to open
+ relevant_joint: the prismatic joint to open
+ should_open: a boolean indicating whether we are opening or closing
+ num_waypoints: the number of waypoints to interpolate between the start and end poses (default is "default")
+
+ Returns:
+ Tuple, containing:
+ offset_grasp_pose_in_world_frame: the grasp pose in the world frame
+ waypoints: the interpolated waypoints between the start and end poses
+ approach_direction_in_world_frame: the approach direction in the world frame
+ grasp_required: a boolean indicating whether a grasp is required for the opening/closing based on which side of the joint we are
+ required_pos_change: the required change in position of the joint to open/close
+ """
+ link_name = relevant_joint.body1.split("/")[-1]
+
+ # Get the bounding box of the child link.
+ (
+ bbox_center_in_world,
+ bbox_quat_in_world,
+ bbox_extent_in_link_frame,
+ _,
+ ) = target_obj.get_base_aligned_bbox(link_name=link_name, visual=False)
+
+ # Match the push axis to one of the bb axes.
+ joint_orientation = gf_quat_to_np_array(relevant_joint.get_attribute("physics:localRot0"))[[1, 2, 3, 0]]
+ push_axis = R.from_quat(joint_orientation).apply([1, 0, 0])
+ assert np.isclose(np.max(np.abs(push_axis)), 1.0) # Make sure we're aligned with a bb axis.
+ push_axis_idx = np.argmax(np.abs(push_axis))
+ canonical_push_axis = np.eye(3)[push_axis_idx]
+
+ # TODO: Need to figure out how to get the correct push direction.
+ push_direction = np.sign(push_axis[push_axis_idx]) if should_open else -1 * np.sign(push_axis[push_axis_idx])
+ canonical_push_direction = canonical_push_axis * push_direction
+
+ # Pick the closer of the two faces along the push axis as our favorite.
+ points_along_push_axis = (
+ np.array([canonical_push_axis, -canonical_push_axis]) * bbox_extent_in_link_frame[push_axis_idx] / 2
+ )
+ (
+ push_axis_closer_side_idx,
+ center_of_selected_surface_along_push_axis,
+ _,
+ ) = _get_closest_point_to_point_in_world_frame(
+ points_along_push_axis, (bbox_center_in_world, bbox_quat_in_world), robot.get_position()
+ )
+ push_axis_closer_side_sign = 1 if push_axis_closer_side_idx == 0 else -1
+
+ # Pick the other axes.
+ all_axes = list(set(range(3)) - {push_axis_idx})
+ x_axis_idx, y_axis_idx = tuple(sorted(all_axes))
+ canonical_x_axis = np.eye(3)[x_axis_idx]
+ canonical_y_axis = np.eye(3)[y_axis_idx]
+
+ # Find the correct side of the lateral axis & go some distance along that direction.
+ min_lateral_pos_wrt_surface_center = (canonical_x_axis + canonical_y_axis) * -bbox_extent_in_link_frame / 2
+ max_lateral_pos_wrt_surface_center = (canonical_x_axis + canonical_y_axis) * bbox_extent_in_link_frame / 2
+ diff_lateral_pos_wrt_surface_center = max_lateral_pos_wrt_surface_center - min_lateral_pos_wrt_surface_center
+ sampled_lateral_pos_wrt_min = np.random.uniform(
+ m.PRISMATIC_JOINT_FRACTION_ACROSS_SURFACE_AXIS_BOUNDS[0] * diff_lateral_pos_wrt_surface_center,
+ m.PRISMATIC_JOINT_FRACTION_ACROSS_SURFACE_AXIS_BOUNDS[1] * diff_lateral_pos_wrt_surface_center,
+ )
+ lateral_pos_wrt_surface_center = min_lateral_pos_wrt_surface_center + sampled_lateral_pos_wrt_min
+ grasp_position_in_bbox_frame = center_of_selected_surface_along_push_axis + lateral_pos_wrt_surface_center
+ grasp_quat_in_bbox_frame = T.quat_inverse(joint_orientation)
+ grasp_pose_in_world_frame = T.pose_transform(
+ bbox_center_in_world, bbox_quat_in_world, grasp_position_in_bbox_frame, grasp_quat_in_bbox_frame
+ )
+
+ # Now apply the grasp offset.
+ dist_from_grasp_pos = robot.finger_lengths[robot.default_arm] + 0.05
+ offset_grasp_pose_in_bbox_frame = (grasp_position_in_bbox_frame + canonical_push_axis * push_axis_closer_side_sign * dist_from_grasp_pos, grasp_quat_in_bbox_frame)
+ offset_grasp_pose_in_world_frame = T.pose_transform(
+ bbox_center_in_world, bbox_quat_in_world, *offset_grasp_pose_in_bbox_frame
+ )
+
+ # To compute the rotation position, we want to decide how far along the rotation axis we'll go.
+ target_joint_pos = relevant_joint.upper_limit if should_open else relevant_joint.lower_limit
+ current_joint_pos = relevant_joint.get_state()[0][0]
+
+ required_pos_change = target_joint_pos - current_joint_pos
+ push_vector_in_bbox_frame = canonical_push_direction * abs(required_pos_change)
+ target_hand_pos_in_bbox_frame = grasp_position_in_bbox_frame + push_vector_in_bbox_frame
+ target_hand_pose_in_world_frame = T.pose_transform(
+ bbox_center_in_world, bbox_quat_in_world, target_hand_pos_in_bbox_frame, grasp_quat_in_bbox_frame
+ )
+
+ # Compute the approach direction.
+ approach_direction_in_world_frame = R.from_quat(bbox_quat_in_world).apply(canonical_push_axis * -push_axis_closer_side_sign)
+
+ # Decide whether a grasp is required. If approach direction and displacement are similar, no need to grasp.
+ grasp_required = np.dot(push_vector_in_bbox_frame, canonical_push_axis * -push_axis_closer_side_sign) < 0
+ # TODO: Need to find a better of getting the predicted position of eef for start point of interpolating waypoints. Maybe
+ # break this into another function that called after the grasp is executed, so we know the eef position?
+ waypoint_start_offset = -0.05 * approach_direction_in_world_frame if should_open else 0.05 * approach_direction_in_world_frame
+ waypoint_start_pose = (grasp_pose_in_world_frame[0] + -1 * approach_direction_in_world_frame * (robot.finger_lengths[robot.default_arm] + waypoint_start_offset), grasp_pose_in_world_frame[1])
+ waypoint_end_pose = (target_hand_pose_in_world_frame[0] + -1 * approach_direction_in_world_frame * (robot.finger_lengths[robot.default_arm]), target_hand_pose_in_world_frame[1])
+ waypoints = interpolate_waypoints(waypoint_start_pose, waypoint_end_pose, num_waypoints=num_waypoints)
+
+ return (
+ offset_grasp_pose_in_world_frame,
+ waypoints,
+ approach_direction_in_world_frame,
+ relevant_joint,
+ grasp_required,
+ required_pos_change
+ )
+
+
+def interpolate_waypoints(start_pose, end_pose, num_waypoints="default"):
+ """
+ Interpolates a series of waypoints between a start and end pose.
+
+ Args:
+ start_pose (tuple): A tuple containing the starting position and orientation as a quaternion.
+ end_pose (tuple): A tuple containing the ending position and orientation as a quaternion.
+ num_waypoints (int, optional): The number of waypoints to interpolate. If "default", the number of waypoints is calculated based on the distance between the start and end pose.
+
+ Returns:
+ list: A list of tuples representing the interpolated waypoints, where each tuple contains a position and orientation as a quaternion.
+ """
+ start_pos, start_orn = start_pose
+ travel_distance = np.linalg.norm(end_pose[0] - start_pos)
+
+ if num_waypoints == "default":
+ num_waypoints = np.max([2, int(travel_distance / 0.01) + 1])
+ pos_waypoints = np.linspace(start_pos, end_pose[0], num_waypoints)
+
+ # Also interpolate the rotations
+ combined_rotation = R.from_quat(np.array([start_orn, end_pose[1]]))
+ slerp = Slerp([0, 1], combined_rotation)
+ orn_waypoints = slerp(np.linspace(0, 1, num_waypoints))
+ quat_waypoints = [x.as_quat() for x in orn_waypoints]
+ return [waypoint for waypoint in zip(pos_waypoints, quat_waypoints)]
+
+
+def grasp_position_for_open_on_revolute_joint(robot, target_obj, relevant_joint, should_open):
+ """
+ Computes the grasp position for opening or closing a revolute joint.
+
+ Args:
+ robot: the robot object
+ target_obj: the object to open
+ relevant_joint: the revolute joint to open
+ should_open: a boolean indicating whether we are opening or closing
+
+ Returns:
+ Tuple, containing:
+ offset_grasp_pose_in_world_frame: the grasp pose in the world frame
+ waypoints: the interpolated waypoints between the start and end poses
+ approach_direction_in_world_frame: the approach direction in the world frame
+ grasp_required: a boolean indicating whether a grasp is required for the opening/closing based on which side of the joint we are
+ required_pos_change: the required change in position of the joint to open/close
+ """
+ link_name = relevant_joint.body1.split("/")[-1]
+ link = target_obj.links[link_name]
+
+ # Get the bounding box of the child link.
+ (
+ bbox_center_in_world,
+ bbox_quat_in_world,
+ _,
+ bbox_center_in_obj_frame
+ ) = target_obj.get_base_aligned_bbox(link_name=link_name, visual=False)
+
+ bbox_quat_in_world = link.get_orientation()
+ bbox_extent_in_link_frame = np.array(target_obj.native_link_bboxes[link_name]['collision']['axis_aligned']['extent'])
+ bbox_wrt_origin = T.relative_pose_transform(bbox_center_in_world, bbox_quat_in_world, *link.get_position_orientation())
+ origin_wrt_bbox = T.invert_pose_transform(*bbox_wrt_origin)
+
+ joint_orientation = gf_quat_to_np_array(relevant_joint.get_attribute("physics:localRot0"))[[1, 2, 3, 0]]
+ joint_axis = R.from_quat(joint_orientation).apply([1, 0, 0])
+ joint_axis /= np.linalg.norm(joint_axis)
+ origin_towards_bbox = np.array(bbox_wrt_origin[0])
+ open_direction = np.cross(joint_axis, origin_towards_bbox)
+ open_direction /= np.linalg.norm(open_direction)
+ lateral_axis = np.cross(open_direction, joint_axis)
+
+ # Match the axes to the canonical axes of the link bb.
+ lateral_axis_idx = np.argmax(np.abs(lateral_axis))
+ open_axis_idx = np.argmax(np.abs(open_direction))
+ joint_axis_idx = np.argmax(np.abs(joint_axis))
+ assert lateral_axis_idx != open_axis_idx
+ assert lateral_axis_idx != joint_axis_idx
+ assert open_axis_idx != joint_axis_idx
+
+ canonical_open_direction = np.eye(3)[open_axis_idx]
+ points_along_open_axis = (
+ np.array([canonical_open_direction, -canonical_open_direction]) * bbox_extent_in_link_frame[open_axis_idx] / 2
+ )
+ current_yaw = relevant_joint.get_state()[0][0]
+ closed_yaw = relevant_joint.lower_limit
+ points_along_open_axis_after_rotation = [
+ _rotate_point_around_axis((point, [0, 0, 0, 1]), bbox_wrt_origin, joint_axis, closed_yaw - current_yaw)[0]
+ for point in points_along_open_axis
+ ]
+ open_axis_closer_side_idx, _, _ = _get_closest_point_to_point_in_world_frame(
+ points_along_open_axis_after_rotation, (bbox_center_in_world, bbox_quat_in_world), robot.get_position()
+ )
+ open_axis_closer_side_sign = 1 if open_axis_closer_side_idx == 0 else -1
+ center_of_selected_surface_along_push_axis = points_along_open_axis[open_axis_closer_side_idx]
+
+ # Find the correct side of the lateral axis & go some distance along that direction.
+ canonical_joint_axis = np.eye(3)[joint_axis_idx]
+ lateral_away_from_origin = np.eye(3)[lateral_axis_idx] * np.sign(origin_towards_bbox[lateral_axis_idx])
+ min_lateral_pos_wrt_surface_center = (
+ lateral_away_from_origin * -np.array(origin_wrt_bbox[0])
+ - canonical_joint_axis * bbox_extent_in_link_frame[lateral_axis_idx] / 2
+ )
+ max_lateral_pos_wrt_surface_center = (
+ lateral_away_from_origin * bbox_extent_in_link_frame[lateral_axis_idx] / 2
+ + canonical_joint_axis * bbox_extent_in_link_frame[lateral_axis_idx] / 2
+ )
+ diff_lateral_pos_wrt_surface_center = max_lateral_pos_wrt_surface_center - min_lateral_pos_wrt_surface_center
+ sampled_lateral_pos_wrt_min = np.random.uniform(
+ m.REVOLUTE_JOINT_FRACTION_ACROSS_SURFACE_AXIS_BOUNDS[0] * diff_lateral_pos_wrt_surface_center,
+ m.REVOLUTE_JOINT_FRACTION_ACROSS_SURFACE_AXIS_BOUNDS[1] * diff_lateral_pos_wrt_surface_center,
+ )
+ lateral_pos_wrt_surface_center = min_lateral_pos_wrt_surface_center + sampled_lateral_pos_wrt_min
+ grasp_position = center_of_selected_surface_along_push_axis + lateral_pos_wrt_surface_center
+ # Get the appropriate rotation
+
+ # grasp_quat_in_bbox_frame = get_quaternion_between_vectors([1, 0, 0], canonical_open_direction * open_axis_closer_side_sign * -1)
+ grasp_quat_in_bbox_frame = _get_orientation_facing_vector_with_random_yaw(canonical_open_direction * open_axis_closer_side_sign * -1)
+
+ # Now apply the grasp offset.
+ dist_from_grasp_pos = robot.finger_lengths[robot.default_arm] + 0.05
+ offset_in_bbox_frame = canonical_open_direction * open_axis_closer_side_sign * dist_from_grasp_pos
+ offset_grasp_pose_in_bbox_frame = (grasp_position + offset_in_bbox_frame, grasp_quat_in_bbox_frame)
+ offset_grasp_pose_in_world_frame = T.pose_transform(
+ bbox_center_in_world, bbox_quat_in_world, *offset_grasp_pose_in_bbox_frame
+ )
+
+ # To compute the rotation position, we want to decide how far along the rotation axis we'll go.
+ desired_yaw = relevant_joint.upper_limit if should_open else relevant_joint.lower_limit
+ required_yaw_change = desired_yaw - current_yaw
+
+ # Now we'll rotate the grasp position around the origin by the desired rotation.
+ # Note that we use the non-offset position here since the joint can't be pulled all the way to the offset.
+ grasp_pose_in_bbox_frame = grasp_position, grasp_quat_in_bbox_frame
+ grasp_pose_in_origin_frame = T.pose_transform(*bbox_wrt_origin, *grasp_pose_in_bbox_frame)
+
+ # Get the arc length and divide it up to 10cm segments
+ arc_length = abs(required_yaw_change) * np.linalg.norm(grasp_pose_in_origin_frame[0])
+ turn_steps = int(ceil(arc_length / m.ROTATION_ARC_SEGMENT_LENGTHS))
+ targets = []
+
+ for i in range(turn_steps):
+ partial_yaw_change = (i + 1) / turn_steps * required_yaw_change
+ rotated_grasp_pose_in_bbox_frame = _rotate_point_around_axis(
+ (offset_grasp_pose_in_bbox_frame[0], offset_grasp_pose_in_bbox_frame[1]), bbox_wrt_origin, joint_axis, partial_yaw_change
+ )
+ rotated_grasp_pose_in_world_frame = T.pose_transform(
+ bbox_center_in_world, bbox_quat_in_world, *rotated_grasp_pose_in_bbox_frame
+ )
+ targets.append(rotated_grasp_pose_in_world_frame)
+
+ # Compute the approach direction.
+ approach_direction_in_world_frame = R.from_quat(bbox_quat_in_world).apply(canonical_open_direction * -open_axis_closer_side_sign)
+
+ # Decide whether a grasp is required. If approach direction and displacement are similar, no need to grasp.
+ movement_in_world_frame = np.array(targets[-1][0]) - np.array(offset_grasp_pose_in_world_frame[0])
+ grasp_required = np.dot(movement_in_world_frame, approach_direction_in_world_frame) < 0
+
+ return (
+ offset_grasp_pose_in_world_frame,
+ targets,
+ approach_direction_in_world_frame,
+ grasp_required,
+ required_yaw_change,
+ )
+
+
+def _get_orientation_facing_vector_with_random_yaw(vector):
+ """
+ Get a quaternion that orients the x-axis of the object to face the given vector and the y and z
+ axes to be random.
+
+ Args:
+ vector (np.ndarray): The vector to face.
+
+ Returns:
+ np.ndarray: A quaternion representing the orientation.
+ """
+ forward = vector / np.linalg.norm(vector)
+ rand_vec = np.random.rand(3)
+ rand_vec /= np.linalg.norm(3)
+ side = np.cross(rand_vec, forward)
+ side /= np.linalg.norm(3)
+ up = np.cross(forward, side)
+ # assert np.isclose(np.linalg.norm(up), 1, atol=1e-3)
+ rotmat = np.array([forward, side, up]).T
+ return R.from_matrix(rotmat).as_quat()
+
+
+def _rotate_point_around_axis(point_wrt_arbitrary_frame, arbitrary_frame_wrt_origin, joint_axis, yaw_change):
+ """
+ Rotate a point around an axis, given the point in an arbitrary frame, the arbitrary frame's pose in the origin frame,
+ the axis to rotate around, and the amount to rotate by. This is a utility for rotating the grasp position around the
+ joint axis.
+
+ Args:
+ point_wrt_arbitrary_frame (tuple): The point in the arbitrary frame.
+ arbitrary_frame_wrt_origin (tuple): The pose of the arbitrary frame in the origin frame.
+ joint_axis (np.ndarray): The axis to rotate around.
+ yaw_change (float): The amount to rotate by.
+
+ Returns:
+ tuple: The rotated point in the arbitrary frame.
+ """
+ rotation = R.from_rotvec(joint_axis * yaw_change).as_quat()
+ origin_wrt_arbitrary_frame = T.invert_pose_transform(*arbitrary_frame_wrt_origin)
+
+ pose_in_origin_frame = T.pose_transform(*arbitrary_frame_wrt_origin, *point_wrt_arbitrary_frame)
+ rotated_pose_in_origin_frame = T.pose_transform([0, 0, 0], rotation, *pose_in_origin_frame)
+ rotated_pose_in_arbitrary_frame = T.pose_transform(*origin_wrt_arbitrary_frame, *rotated_pose_in_origin_frame)
+ return rotated_pose_in_arbitrary_frame
+
+
+def _get_closest_point_to_point_in_world_frame(
+ vectors_in_arbitrary_frame, arbitrary_frame_to_world_frame, point_in_world
+):
+ """
+ Given a set of vectors in an arbitrary frame, find the closest vector to a point in world frame.
+ Useful for picking between two sides of a joint for grasping.
+
+ Args:
+ vectors_in_arbitrary_frame (list): A list of vectors in the arbitrary frame.
+ arbitrary_frame_to_world_frame (tuple): The pose of the arbitrary frame in the world frame.
+ point_in_world (tuple): The point in the world frame.
+
+ Returns:
+ tuple: The index of the closest vector, the closest vector in the arbitrary frame, and the closest vector in the world frame.
+ """
+ vectors_in_world = np.array(
+ [
+ T.pose_transform(*arbitrary_frame_to_world_frame, vector, [0, 0, 0, 1])[0]
+ for vector in vectors_in_arbitrary_frame
+ ]
+ )
+
+ vector_distances_to_point = np.linalg.norm(vectors_in_world - np.array(point_in_world)[None, :], axis=1)
+ closer_option_idx = np.argmin(vector_distances_to_point)
+ vector_in_arbitrary_frame = vectors_in_arbitrary_frame[closer_option_idx]
+ vector_in_world_frame = vectors_in_world[closer_option_idx]
+
+ return closer_option_idx, vector_in_arbitrary_frame, vector_in_world_frame
diff --git a/omnigibson/utils/motion_planning_utils.py b/omnigibson/utils/motion_planning_utils.py
new file mode 100644
index 000000000..6a5f99bb1
--- /dev/null
+++ b/omnigibson/utils/motion_planning_utils.py
@@ -0,0 +1,546 @@
+import numpy as np
+from math import ceil
+
+import omnigibson as og
+from omnigibson.macros import create_module_macros
+from omnigibson.object_states import ContactBodies
+import omnigibson.utils.transform_utils as T
+from omnigibson.utils.control_utils import IKSolver
+from pxr import PhysicsSchemaTools, Gf
+
+m = create_module_macros(module_path=__file__)
+m.ANGLE_DIFF = 0.3
+m.DIST_DIFF = 0.1
+
+def _wrap_angle(theta):
+ """"
+ Converts an angle to the range [-pi, pi).
+
+ Args:
+ theta (float): angle in radians
+
+ Returns:
+ float: angle in radians in range [-pi, pi)
+ """
+ return (theta + np.pi) % (2 * np.pi) - np.pi
+
+
+def plan_base_motion(
+ robot,
+ end_conf,
+ context,
+ planning_time=15.0,
+):
+ """
+ Plans a base motion to a 2d pose
+
+ Args:
+ robot (omnigibson.object_states.Robot): Robot object to plan for
+ end_conf (Iterable): [x, y, yaw] 2d pose to plan to
+ context (PlanningContext): Context to plan in that includes the robot copy
+ planning_time (float): Time to plan for
+
+ Returns:
+ Array of arrays: Array of 2d poses that the robot should navigate to
+ """
+ from ompl import base as ob
+ from ompl import geometric as ompl_geo
+
+ class CustomMotionValidator(ob.MotionValidator):
+
+ def __init__(self, si, space):
+ super(CustomMotionValidator, self).__init__(si)
+ self.si = si
+ self.space = space
+
+ def checkMotion(self, s1, s2):
+ if not self.si.isValid(s2):
+ return False
+
+ start = np.array([s1.getX(), s1.getY(), s1.getYaw()])
+ goal = np.array([s2.getX(), s2.getY(), s2.getYaw()])
+ segment_theta = self.get_angle_between_poses(start, goal)
+
+ # Start rotation
+ if not self.is_valid_rotation(self.si, start, segment_theta):
+ return False
+
+ # Navigation
+ dist = np.linalg.norm(goal[:2] - start[:2])
+ num_points = ceil(dist / m.DIST_DIFF) + 1
+ nav_x = np.linspace(start[0], goal[0], num_points).tolist()
+ nav_y = np.linspace(start[1], goal[1], num_points).tolist()
+ for i in range(num_points):
+ state = create_state(self.si, nav_x[i], nav_y[i], segment_theta)
+ if not self.si.isValid(state()):
+ return False
+
+ # Goal rotation
+ if not self.is_valid_rotation(self.si, [goal[0], goal[1], segment_theta], goal[2]):
+ return False
+
+ return True
+
+ @staticmethod
+ def is_valid_rotation(si, start_conf, final_orientation):
+ diff = _wrap_angle(final_orientation - start_conf[2])
+ direction = np.sign(diff)
+ diff = abs(diff)
+ num_points = ceil(diff / m.ANGLE_DIFF) + 1
+ nav_angle = np.linspace(0.0, diff, num_points) * direction
+ angles = nav_angle + start_conf[2]
+ for i in range(num_points):
+ state = create_state(si.getStateSpace(), start_conf[0], start_conf[1], angles[i])
+ if not si.isValid(state()):
+ return False
+ return True
+
+ @staticmethod
+ # Get angle between 2d robot poses
+ def get_angle_between_poses(p1, p2):
+ segment = []
+ segment.append(p2[0] - p1[0])
+ segment.append(p2[1] - p1[1])
+ return np.arctan2(segment[1], segment[0])
+
+ def create_state(space, x, y, yaw):
+ state = ob.State(space)
+ state().setX(x)
+ state().setY(y)
+ state().setYaw(_wrap_angle(yaw))
+ return state
+
+ def state_valid_fn(q):
+ x = q.getX()
+ y = q.getY()
+ yaw = q.getYaw()
+ pose = ([x, y, 0.0], T.euler2quat((0, 0, yaw)))
+ return not set_base_and_detect_collision(context, pose)
+
+ def remove_unnecessary_rotations(path):
+ """
+ Removes unnecessary rotations from a path when possible for the base where the yaw for each pose in the path is in the direction of the
+ the position of the next pose in the path
+
+ Args:
+ path (Array of arrays): Array of 2d poses
+
+ Returns:
+ Array of numpy arrays: Array of 2d poses with unnecessary rotations removed
+ """
+ # Start at the same starting pose
+ new_path = [path[0]]
+
+ # Process every intermediate waypoint
+ for i in range(1, len(path) - 1):
+ # compute the yaw you'd be at when arriving into path[i] and departing from it
+ arriving_yaw = CustomMotionValidator.get_angle_between_poses(path[i-1], path[i])
+ departing_yaw = CustomMotionValidator.get_angle_between_poses(path[i], path[i+1])
+
+ # check if you are able to make that rotation directly.
+ arriving_state = (path[i][0], path[i][1], arriving_yaw)
+ if CustomMotionValidator.is_valid_rotation(si, arriving_state, departing_yaw):
+ # Then use the arriving yaw directly
+ new_path.append(arriving_state)
+ else:
+ # Otherwise, keep the waypoint
+ new_path.append(path[i])
+
+ # Don't forget to add back the same ending pose
+ new_path.append(path[-1])
+
+ return new_path
+
+ pos = robot.get_position()
+ yaw = T.quat2euler(robot.get_orientation())[2]
+ start_conf = (pos[0], pos[1], yaw)
+
+ # create an SE(2) state space
+ space = ob.SE2StateSpace()
+
+ # set lower and upper bounds
+ bbox_vals = []
+ for floor in filter(lambda o: o.category == "floors", og.sim.scene.objects):
+ bbox_vals += floor.aabb[0][:2].tolist()
+ bbox_vals += floor.aabb[1][:2].tolist()
+ bounds = ob.RealVectorBounds(2)
+ bounds.setLow(min(bbox_vals))
+ bounds.setHigh(max(bbox_vals))
+ space.setBounds(bounds)
+
+ # create a simple setup object
+ ss = ompl_geo.SimpleSetup(space)
+ ss.setStateValidityChecker(ob.StateValidityCheckerFn(state_valid_fn))
+
+ si = ss.getSpaceInformation()
+ si.setMotionValidator(CustomMotionValidator(si, space))
+ # TODO: Try changing to RRTConnect in the future. Currently using RRT because movement is not direction invariant. Can change to RRTConnect
+ # possibly if hasSymmetricInterpolate is set to False for the state space. Doc here https://ompl.kavrakilab.org/classompl_1_1base_1_1StateSpace.html
+ planner = ompl_geo.RRT(si)
+ ss.setPlanner(planner)
+
+ start = create_state(space, start_conf[0], start_conf[1], start_conf[2])
+ print(start)
+
+ goal = create_state(space, end_conf[0], end_conf[1], end_conf[2])
+ print(goal)
+
+ ss.setStartAndGoalStates(start, goal)
+ if not state_valid_fn(start()) or not state_valid_fn(goal()):
+ return
+
+ solved = ss.solve(planning_time)
+
+ if solved:
+ # try to shorten the path
+ ss.simplifySolution()
+ sol_path = ss.getSolutionPath()
+ return_path = []
+ for i in range(sol_path.getStateCount()):
+ x = sol_path.getState(i).getX()
+ y = sol_path.getState(i).getY()
+ yaw = sol_path.getState(i).getYaw()
+ return_path.append([x, y, yaw])
+ return remove_unnecessary_rotations(return_path)
+ return None
+
+def plan_arm_motion(
+ robot,
+ end_conf,
+ context,
+ planning_time=15.0,
+ torso_fixed=True,
+):
+ """
+ Plans an arm motion to a final joint position
+
+ Args:
+ robot (BaseRobot): Robot object to plan for
+ end_conf (Iterable): Final joint position to plan to
+ context (PlanningContext): Context to plan in that includes the robot copy
+ planning_time (float): Time to plan for
+
+ Returns:
+ Array of arrays: Array of joint positions that the robot should navigate to
+ """
+ from ompl import base as ob
+ from ompl import geometric as ompl_geo
+
+ if torso_fixed:
+ joint_control_idx = robot.arm_control_idx[robot.default_arm]
+ dim = len(joint_control_idx)
+ initial_joint_pos = np.array(robot.get_joint_positions()[joint_control_idx])
+ control_idx_in_joint_pos = np.arange(dim)
+ else:
+ joint_control_idx = np.concatenate([robot.trunk_control_idx, robot.arm_control_idx[robot.default_arm]])
+ dim = len(joint_control_idx)
+ if "combined" in robot.robot_arm_descriptor_yamls:
+ joint_combined_idx = np.concatenate([robot.trunk_control_idx, robot.arm_control_idx["combined"]])
+ initial_joint_pos = np.array(robot.get_joint_positions()[joint_combined_idx])
+ control_idx_in_joint_pos = np.where(np.in1d(joint_combined_idx, joint_control_idx))[0]
+ else:
+ initial_joint_pos = np.array(robot.get_joint_positions()[joint_control_idx])
+ control_idx_in_joint_pos = np.arange(dim)
+
+ def state_valid_fn(q):
+ joint_pos = initial_joint_pos
+ joint_pos[control_idx_in_joint_pos] = [q[i] for i in range(dim)]
+ return not set_arm_and_detect_collision(context, joint_pos)
+
+ # create an SE2 state space
+ space = ob.RealVectorStateSpace(dim)
+
+ # set lower and upper bounds
+ bounds = ob.RealVectorBounds(dim)
+ joints = np.array([joint for joint in robot.joints.values()])
+ arm_joints = joints[joint_control_idx]
+ for i, joint in enumerate(arm_joints):
+ if end_conf[i] > joint.upper_limit:
+ end_conf[i] = joint.upper_limit
+ if end_conf[i] < joint.lower_limit:
+ end_conf[i] = joint.lower_limit
+ if joint.upper_limit - joint.lower_limit > 2 * np.pi:
+ bounds.setLow(i, 0.0)
+ bounds.setHigh(i, 2 * np.pi)
+ else:
+ bounds.setLow(i, float(joint.lower_limit))
+ bounds.setHigh(i, float(joint.upper_limit))
+ space.setBounds(bounds)
+
+ # create a simple setup object
+ ss = ompl_geo.SimpleSetup(space)
+ ss.setStateValidityChecker(ob.StateValidityCheckerFn(state_valid_fn))
+
+ si = ss.getSpaceInformation()
+ planner = ompl_geo.BITstar(si)
+ ss.setPlanner(planner)
+
+ start_conf = robot.get_joint_positions()[joint_control_idx]
+ start = ob.State(space)
+ for i in range(dim):
+ start[i] = float(start_conf[i])
+
+ goal = ob.State(space)
+ for i in range(dim):
+ goal[i] = float(end_conf[i])
+ ss.setStartAndGoalStates(start, goal)
+
+ if not state_valid_fn(start) or not state_valid_fn(goal):
+ return
+
+ # this will automatically choose a default planner with
+ # default parameters
+ solved = ss.solve(planning_time)
+
+ if solved:
+ # try to shorten the path
+ # ss.simplifySolution()
+
+ sol_path = ss.getSolutionPath()
+ return_path = []
+ for i in range(sol_path.getStateCount()):
+ joint_pos = [sol_path.getState(i)[j] for j in range(dim)]
+ return_path.append(joint_pos)
+ return return_path
+ return None
+
+def plan_arm_motion_ik(
+ robot,
+ end_conf,
+ context,
+ planning_time=15.0,
+ torso_fixed=True,
+):
+ """
+ Plans an arm motion to a final end effector pose
+
+ Args:
+ robot (BaseRobot): Robot object to plan for
+ end_conf (Iterable): Final end effector pose to plan to
+ context (PlanningContext): Context to plan in that includes the robot copy
+ planning_time (float): Time to plan for
+
+ Returns:
+ Array of arrays: Array of end effector pose that the robot should navigate to
+ """
+ from ompl import base as ob
+ from ompl import geometric as ompl_geo
+
+ DOF = 6
+
+ if torso_fixed:
+ joint_control_idx = robot.arm_control_idx[robot.default_arm]
+ dim = len(joint_control_idx)
+ initial_joint_pos = np.array(robot.get_joint_positions()[joint_control_idx])
+ control_idx_in_joint_pos = np.arange(dim)
+ robot_description_path = robot.robot_arm_descriptor_yamls["left_fixed"]
+ else:
+ joint_control_idx = np.concatenate([robot.trunk_control_idx, robot.arm_control_idx[robot.default_arm]])
+ dim = len(joint_control_idx)
+ if "combined" in robot.robot_arm_descriptor_yamls:
+ joint_combined_idx = np.concatenate([robot.trunk_control_idx, robot.arm_control_idx["combined"]])
+ initial_joint_pos = np.array(robot.get_joint_positions()[joint_combined_idx])
+ control_idx_in_joint_pos = np.where(np.in1d(joint_combined_idx, joint_control_idx))[0]
+ else:
+ initial_joint_pos = np.array(robot.get_joint_positions()[joint_control_idx])
+ control_idx_in_joint_pos = np.arange(dim)
+ robot_description_path = robot.robot_arm_descriptor_yamls[robot.default_arm]
+
+ ik_solver = IKSolver(
+ robot_description_path=robot_description_path,
+ robot_urdf_path=robot.urdf_path,
+ default_joint_pos=robot.default_joint_pos[joint_control_idx],
+ eef_name=robot.eef_link_names[robot.default_arm],
+ )
+
+ def state_valid_fn(q):
+ joint_pos = initial_joint_pos
+ eef_pose = [q[i] for i in range(6)]
+ control_joint_pos = ik_solver.solve(
+ target_pos=eef_pose[:3],
+ target_quat=T.axisangle2quat(eef_pose[3:]),
+ max_iterations=1000,
+ )
+
+ if control_joint_pos is None:
+ return False
+ joint_pos[control_idx_in_joint_pos] = control_joint_pos
+ return not set_arm_and_detect_collision(context, joint_pos)
+
+ # create an SE2 state space
+ space = ob.RealVectorStateSpace(DOF)
+
+ # set lower and upper bounds for eef position
+ bounds = ob.RealVectorBounds(DOF)
+
+ EEF_X_LIM = [-0.8, 0.8]
+ EEF_Y_LIM = [-0.8, 0.8]
+ EEF_Z_LIM = [-2.0, 2.0]
+ bounds.setLow(0, EEF_X_LIM[0])
+ bounds.setHigh(0, EEF_X_LIM[1])
+ bounds.setLow(1, EEF_Y_LIM[0])
+ bounds.setHigh(1, EEF_Y_LIM[1])
+ bounds.setLow(2, EEF_Z_LIM[0])
+ bounds.setHigh(2, EEF_Z_LIM[1])
+
+ # # set lower and upper bounds for eef orientation (axis angle bounds)
+ for i in range(3, 6):
+ bounds.setLow(i, -np.pi)
+ bounds.setHigh(i, np.pi)
+ space.setBounds(bounds)
+
+ # create a simple setup object
+ ss = ompl_geo.SimpleSetup(space)
+ ss.setStateValidityChecker(ob.StateValidityCheckerFn(state_valid_fn))
+
+ si = ss.getSpaceInformation()
+ planner = ompl_geo.BITstar(si)
+ ss.setPlanner(planner)
+
+ start_conf = np.append(robot.get_relative_eef_position(), T.quat2axisangle(robot.get_relative_eef_orientation()))
+ # do fk
+ start = ob.State(space)
+ for i in range(DOF):
+ start[i] = float(start_conf[i])
+
+ goal = ob.State(space)
+ for i in range(DOF):
+ goal[i] = float(end_conf[i])
+ ss.setStartAndGoalStates(start, goal)
+
+ if not state_valid_fn(start) or not state_valid_fn(goal):
+ return
+
+ # this will automatically choose a default planner with
+ # default parameters
+ solved = ss.solve(planning_time)
+
+ if solved:
+ # try to shorten the path
+ # ss.simplifySolution()
+
+ sol_path = ss.getSolutionPath()
+ return_path = []
+ for i in range(sol_path.getStateCount()):
+ eef_pose = [sol_path.getState(i)[j] for j in range(DOF)]
+ return_path.append(eef_pose)
+ return return_path
+ return None
+
+def set_base_and_detect_collision(context, pose):
+ """
+ Moves the robot and detects robot collisions with the environment and itself
+
+ Args:
+ context (PlanningContext): Context to plan in that includes the robot copy
+ pose (Array): Pose in the world frame to check for collisions at
+
+ Returns:
+ bool: Whether the robot is in collision
+ """
+ robot_copy = context.robot_copy
+ robot_copy_type = context.robot_copy_type
+
+ translation = Gf.Vec3d(*np.array(pose[0], dtype=float))
+ robot_copy.prims[robot_copy_type].GetAttribute("xformOp:translate").Set(translation)
+
+ orientation = np.array(pose[1], dtype=float)[[3, 0, 1, 2]]
+ robot_copy.prims[robot_copy_type].GetAttribute("xformOp:orient").Set(Gf.Quatd(*orientation))
+
+ return detect_robot_collision(context)
+
+def set_arm_and_detect_collision(context, joint_pos):
+ """
+ Sets joint positions of the robot and detects robot collisions with the environment and itself
+
+ Args:
+ context (PlanningContext): Context to plan in that includes the robot copy
+ joint_pos (Array): Joint positions to set the robot to
+
+ Returns:
+ bool: Whether the robot is in a valid state i.e. not in collision
+ """
+ robot_copy = context.robot_copy
+ robot_copy_type = context.robot_copy_type
+
+ arm_links = context.robot.manipulation_link_names
+ link_poses = context.fk_solver.get_link_poses(joint_pos, arm_links)
+
+ for link in arm_links:
+ pose = link_poses[link]
+ if link in robot_copy.meshes[robot_copy_type].keys():
+ for mesh_name, mesh in robot_copy.meshes[robot_copy_type][link].items():
+ relative_pose = robot_copy.relative_poses[robot_copy_type][link][mesh_name]
+ mesh_pose = T.pose_transform(*pose, *relative_pose)
+ translation = Gf.Vec3d(*np.array(mesh_pose[0], dtype=float))
+ mesh.GetAttribute("xformOp:translate").Set(translation)
+ orientation = np.array(mesh_pose[1], dtype=float)[[3, 0, 1, 2]]
+ mesh.GetAttribute("xformOp:orient").Set(Gf.Quatd(*orientation))
+
+
+ return detect_robot_collision(context)
+
+def detect_robot_collision(context):
+ """
+ Detects robot collisions
+
+ Args:
+ context (PlanningContext): Context to plan in that includes the robot copy
+
+ Returns:
+ bool: Whether the robot is in collision
+ """
+ robot_copy = context.robot_copy
+ robot_copy_type = context.robot_copy_type
+
+ # Define function for checking overlap
+ valid_hit = False
+ mesh_path = None
+
+ def overlap_callback(hit):
+ nonlocal valid_hit
+
+ valid_hit = hit.rigid_body not in context.disabled_collision_pairs_dict[mesh_path]
+
+ return not valid_hit
+
+ for meshes in robot_copy.meshes[robot_copy_type].values():
+ for mesh in meshes.values():
+ if valid_hit:
+ return valid_hit
+ mesh_path = mesh.GetPrimPath().pathString
+ mesh_id = PhysicsSchemaTools.encodeSdfPath(mesh_path)
+ if mesh.GetTypeName() == "Mesh":
+ og.sim.psqi.overlap_mesh(*mesh_id, reportFn=overlap_callback)
+ else:
+ og.sim.psqi.overlap_shape(*mesh_id, reportFn=overlap_callback)
+
+ return valid_hit
+
+def detect_robot_collision_in_sim(robot, filter_objs=[], ignore_obj_in_hand=True):
+ """
+ Detects robot collisions with the environment, but not with itself using the ContactBodies API
+
+ Args:
+ robot (BaseRobot): Robot object to detect collisions for
+ filter_objs (Array of StatefulObject): Objects to ignore collisions with
+ ignore_obj_in_hand (bool): Whether to ignore collisions with the object in the robot's hand
+
+ Returns:
+ bool: Whether the robot is in collision
+ """
+ filter_categories = ["floors"]
+
+ obj_in_hand = robot._ag_obj_in_hand[robot.default_arm]
+ if obj_in_hand is not None and ignore_obj_in_hand:
+ filter_objs.append(obj_in_hand)
+
+ collision_prims = list(robot.states[ContactBodies].get_value(ignore_objs=tuple(filter_objs)))
+
+ for col_prim in collision_prims:
+ tokens = col_prim.prim_path.split("/")
+ obj_prim_path = "/".join(tokens[:-1])
+ col_obj = og.sim.scene.object_registry("prim_path", obj_prim_path)
+ if col_obj.category in filter_categories:
+ collision_prims.remove(col_prim)
+ return len(collision_prims) > 0
diff --git a/omnigibson/utils/object_state_utils.py b/omnigibson/utils/object_state_utils.py
index 0a19f5243..701cec533 100644
--- a/omnigibson/utils/object_state_utils.py
+++ b/omnigibson/utils/object_state_utils.py
@@ -6,7 +6,7 @@
from scipy.spatial import ConvexHull, distance_matrix
import omnigibson as og
-from omnigibson.macros import create_module_macros, Dict, gm
+from omnigibson.macros import create_module_macros, Dict, macros
from omnigibson.object_states.aabb import AABB
from omnigibson.object_states.contact_bodies import ContactBodies
from omnigibson.utils import sampling_utils
@@ -42,6 +42,48 @@
})
+def sample_cuboid_for_predicate(predicate, on_obj, bbox_extent):
+ if predicate == "onTop":
+ params = m.ON_TOP_RAY_CASTING_SAMPLING_PARAMS
+ elif predicate == "inside":
+ params = m.INSIDE_RAY_CASTING_SAMPLING_PARAMS
+ elif predicate == "under":
+ params = m.UNDER_RAY_CASTING_SAMPLING_PARAMS
+ else:
+ raise ValueError(f"predicate must be onTop, under or inside in order to use ray casting-based "
+ f"kinematic sampling, but instead got: {predicate}")
+
+ if predicate == "under":
+ start_points, end_points = sampling_utils.sample_raytest_start_end_symmetric_bimodal_distribution(
+ obj=on_obj,
+ num_samples=1,
+ axis_probabilities=[0, 0, 1],
+ **params,
+ )
+ return sampling_utils.sample_cuboid_on_object(
+ obj=None,
+ start_points=start_points,
+ end_points=end_points,
+ ignore_objs=[on_obj],
+ cuboid_dimensions=bbox_extent,
+ refuse_downwards=True,
+ undo_cuboid_bottom_padding=True,
+ max_angle_with_z_axis=0.17,
+ hit_proportion=0.0, # rays will NOT hit the object itself, but the surface below it.
+ )
+ else:
+ return sampling_utils.sample_cuboid_on_object_symmetric_bimodal_distribution(
+ on_obj,
+ num_samples=1,
+ axis_probabilities=[0, 0, 1],
+ cuboid_dimensions=bbox_extent,
+ refuse_downwards=True,
+ undo_cuboid_bottom_padding=True,
+ max_angle_with_z_axis=0.17,
+ **params,
+ )
+
+
def sample_kinematics(
predicate,
objA,
@@ -99,16 +141,6 @@ def sample_kinematics(
# This would slightly change because of the step_physics call.
old_pos, orientation = objA.get_position_orientation()
- if predicate == "onTop":
- params = m.ON_TOP_RAY_CASTING_SAMPLING_PARAMS
- elif predicate == "inside":
- params = m.INSIDE_RAY_CASTING_SAMPLING_PARAMS
- elif predicate == "under":
- params = m.UNDER_RAY_CASTING_SAMPLING_PARAMS
- else:
- raise ValueError(f"predicate must be onTop, under or inside in order to use ray casting-based "
- f"kinematic sampling, but instead got: {predicate}")
-
# Run import here to avoid circular imports
from omnigibson.objects.dataset_object import DatasetObject
if isinstance(objA, DatasetObject) and objA.prim_type == PrimType.RIGID:
@@ -122,36 +154,7 @@ def sample_kinematics(
parallel_bbox_orn = np.array([0.0, 0.0, 0.0, 1.0])
parallel_bbox_extents = aabb_upper - aabb_lower
- if predicate == "under":
- start_points, end_points = sampling_utils.sample_raytest_start_end_symmetric_bimodal_distribution(
- obj=objB,
- num_samples=1,
- axis_probabilities=[0, 0, 1],
- **params,
- )
- sampling_results = sampling_utils.sample_cuboid_on_object(
- obj=None,
- start_points=start_points,
- end_points=end_points,
- ignore_objs=[objB],
- cuboid_dimensions=parallel_bbox_extents,
- refuse_downwards=True,
- undo_cuboid_bottom_padding=True,
- max_angle_with_z_axis=0.17,
- hit_proportion=0.0, # rays will NOT hit the object itself, but the surface below it.
- )
- else:
- sampling_results = sampling_utils.sample_cuboid_on_object_symmetric_bimodal_distribution(
- objB,
- num_samples=1,
- axis_probabilities=[0, 0, 1],
- cuboid_dimensions=parallel_bbox_extents,
- refuse_downwards=True,
- undo_cuboid_bottom_padding=True,
- max_angle_with_z_axis=0.17,
- **params,
- )
-
+ sampling_results = sample_cuboid_for_predicate(predicate, objB, parallel_bbox_extents)
sampled_vector = sampling_results[0][0]
sampled_quaternion = sampling_results[0][2]
@@ -186,7 +189,7 @@ def sample_kinematics(
objA.keep_still()
success = len(objA.states[ContactBodies].get_value()) == 0
- if gm.DEBUG:
+ if macros.utils.sampling_utils.DEBUG_SAMPLING:
debug_breakpoint(f"sample_kinematics: {success}")
if success:
diff --git a/omnigibson/utils/sampling_utils.py b/omnigibson/utils/sampling_utils.py
index 9771e5392..a5377faac 100644
--- a/omnigibson/utils/sampling_utils.py
+++ b/omnigibson/utils/sampling_utils.py
@@ -20,6 +20,7 @@
# Create settings for this module
m = create_module_macros(module_path=__file__)
+m.DEBUG_SAMPLING = False
m.DEFAULT_AABB_OFFSET_FRACTION = 0.02
m.DEFAULT_PARALLEL_RAY_NORMAL_ANGLE_TOLERANCE = 1.0 # Around 60 degrees
m.DEFAULT_HIT_TO_PLANE_THRESHOLD = 0.05
@@ -46,7 +47,7 @@ def fit_plane(points, refusal_log):
- 3-array: (x,y,z) normal of the fitted plane
"""
if points.shape[0] < points.shape[1]:
- if gm.DEBUG:
+ if m.DEBUG_SAMPLING:
refusal_log.append(f"insufficient points to fit a 3D plane: needs 3, has {points.shape[0]}.")
return None, None
@@ -74,7 +75,7 @@ def check_distance_to_plane(points, plane_centroid, plane_normal, hit_to_plane_t
"""
distances = get_distance_to_plane(points, plane_centroid, plane_normal)
if np.any(distances > hit_to_plane_threshold):
- if gm.DEBUG:
+ if m.DEBUG_SAMPLING:
refusal_log.append("distances to plane: %r" % distances)
return False
return True
@@ -563,7 +564,7 @@ def sample_cuboid_on_object_symmetric_bimodal_distribution(
list of tuple: list of length num_samples elements where each element is a tuple in the form of
(cuboid_centroid, cuboid_up_vector, cuboid_rotation, {refusal_reason: [refusal_details...]}). Cuboid positions
are set to None when no successful sampling happens within the max number of attempts. Refusal details are only
- filled if the gm.DEBUG flag is globally set to True.
+ filled if the m.DEBUG_SAMPLING flag is globally set to True.
"""
start_points, end_points = sample_raytest_start_end_symmetric_bimodal_distribution(
obj,
@@ -651,7 +652,7 @@ def sample_cuboid_on_object_full_grid_topdown(
list of tuple: list of length num_samples elements where each element is a tuple in the form of
(cuboid_centroid, cuboid_up_vector, cuboid_rotation, {refusal_reason: [refusal_details...]}). Cuboid positions
are set to None when no successful sampling happens within the max number of attempts. Refusal details are only
- filled if the gm.DEBUG flag is globally set to True.
+ filled if the m.DEBUG_SAMPLING flag is globally set to True.
"""
start_points, end_points = sample_raytest_start_end_full_grid_topdown(
obj,
@@ -735,7 +736,7 @@ def sample_cuboid_on_object(
list of tuple: list of length num_samples elements where each element is a tuple in the form of
(cuboid_centroid, cuboid_up_vector, cuboid_rotation, {refusal_reason: [refusal_details...]}). Cuboid positions
are set to None when no successful sampling happens within the max number of attempts. Refusal details are only
- filled if the gm.DEBUG flag is globally set to True.
+ filled if the m.DEBUG_SAMPLING flag is globally set to True.
"""
assert start_points.shape == end_points.shape, \
@@ -904,7 +905,7 @@ def sample_cuboid_on_object(
results[i] = (cuboid_centroid, plane_normal, rotation.as_quat(), hit_link, refusal_reasons)
break
- if gm.DEBUG:
+ if m.DEBUG_SAMPLING:
og.log.debug("Sampling rejection reasons:")
counter = Counter()
@@ -934,7 +935,7 @@ def compute_rotation_from_grid_sample(two_d_grid, projected_hits, cuboid_centroi
generated hit plane. Otherwise, returns None
"""
if np.sum(hits) < 3:
- if gm.DEBUG:
+ if m.DEBUG_SAMPLING:
refusal_log.append(f"insufficient hits to compute the rotation of the grid: needs 3, has {np.sum(hits)}")
return None
@@ -976,7 +977,7 @@ def check_normal_similarity(center_hit_normal, hit_normals, tolerance, refusal_l
parallel_hit_normal_angles_to_hit_normal < tolerance
)
if not all_rays_hit_with_similar_normal:
- if gm.DEBUG:
+ if m.DEBUG_SAMPLING:
refusal_log.append("angles %r" % (np.rad2deg(parallel_hit_normal_angles_to_hit_normal),))
return False
@@ -1006,7 +1007,7 @@ def check_rays_hit_object(cast_results, threshold, refusal_log, body_names=None)
for ray_res in cast_results
]
if sum(ray_hits) / len(cast_results) < threshold:
- if gm.DEBUG:
+ if m.DEBUG_SAMPLING:
refusal_log.append(f"{sum(ray_hits)} / {len(cast_results)} < {threshold} hits: {[ray_res['rigidBody'] for ray_res in cast_results if ray_res['hit']]}")
return None
@@ -1029,7 +1030,7 @@ def check_hit_max_angle_from_z_axis(hit_normal, max_angle_with_z_axis, refusal_l
"""
hit_angle_with_z = np.arccos(np.clip(np.dot(hit_normal, np.array([0, 0, 1])), -1.0, 1.0))
if hit_angle_with_z > max_angle_with_z_axis:
- if gm.DEBUG:
+ if m.DEBUG_SAMPLING:
refusal_log.append("normal %r" % hit_normal)
return False
@@ -1097,7 +1098,7 @@ def check_cuboid_empty(hit_normal, bottom_corner_positions, this_cuboid_dimensio
Returns:
bool: True if the cuboid is empty, else False
"""
- if gm.DEBUG:
+ if m.DEBUG_SAMPLING:
draw_debug_markers(bottom_corner_positions)
# Compute top corners.
@@ -1119,7 +1120,7 @@ def check_cuboid_empty(hit_normal, bottom_corner_positions, this_cuboid_dimensio
all_pairs = np.array(top_to_bottom_pairs + bottom_pairs + top_pairs)
check_cast_results = raytest_batch(start_points=all_pairs[:, 0, :], end_points=all_pairs[:, 1, :])
if any(ray["hit"] for ray in check_cast_results):
- if gm.DEBUG:
+ if m.DEBUG_SAMPLING:
refusal_log.append("check ray info: %r" % (check_cast_results))
return False
diff --git a/omnigibson/utils/transform_utils.py b/omnigibson/utils/transform_utils.py
index 22d18a727..3c6d02e0f 100644
--- a/omnigibson/utils/transform_utils.py
+++ b/omnigibson/utils/transform_utils.py
@@ -371,7 +371,6 @@ def mat2pose(hmat):
Returns:
2-tuple:
-
- (np.array) (x,y,z) position array in cartesian coordinates
- (np.array) (x,y,z,w) orientation array in quaternion form
"""
@@ -534,7 +533,6 @@ def quat2euler(quat):
"""
return R.from_quat(quat).as_euler("xyz")
-
def pose_in_A_to_pose_in_B(pose_A, pose_A_in_B):
"""
Converts a homogenous matrix corresponding to a point C in frame A
@@ -596,6 +594,11 @@ def pose_transform(pos1, quat1, pos0, quat0):
quat1: (x,y,z,w) orientation to transform
pos0: (x,y,z) initial position
quat0: (x,y,z,w) initial orientation
+
+ Returns:
+ 2-tuple:
+ - (np.array) (x,y,z) position array in cartesian coordinates
+ - (np.array) (x,y,z,w) orientation array in quaternion form
"""
# Get poses
mat0 = pose2mat((pos0, quat0))
@@ -604,6 +607,25 @@ def pose_transform(pos1, quat1, pos0, quat0):
# Multiply and convert back to pos, quat
return mat2pose(mat1 @ mat0)
+def invert_pose_transform(pos, quat):
+ """
+ Inverts a pose transform
+
+ Args:
+ pos: (x,y,z) position to transform
+ quat: (x,y,z,w) orientation to transform
+
+ Returns:
+ 2-tuple:
+ - (np.array) (x,y,z) position array in cartesian coordinates
+ - (np.array) (x,y,z,w) orientation array in quaternion form
+ """
+ # Get pose
+ mat = pose2mat((pos, quat))
+
+ # Invert pose and convert back to pos, quat
+ return mat2pose(pose_inv(mat))
+
def relative_pose_transform(pos1, quat1, pos0, quat0):
"""
@@ -616,6 +638,11 @@ def relative_pose_transform(pos1, quat1, pos0, quat0):
quat1: (x,y,z,w) orientation to transform
pos0: (x,y,z) initial position
quat0: (x,y,z,w) initial orientation
+
+ Returns:
+ 2-tuple:
+ - (np.array) (x,y,z) position array in cartesian coordinates
+ - (np.array) (x,y,z,w) orientation array in quaternion form
"""
# Get poses
mat0 = pose2mat((pos0, quat0))
@@ -1110,4 +1137,15 @@ def check_quat_right_angle(quat, atol=5e-2):
Returns:
bool: Whether the quaternion is a right angle or not
"""
- return np.any(np.isclose(np.abs(quat).sum(), np.array([1.0, 1.414, 2.0]), atol=atol))
\ No newline at end of file
+ return np.any(np.isclose(np.abs(quat).sum(), np.array([1.0, 1.414, 2.0]), atol=atol))
+
+
+def z_angle_from_quat(quat):
+ """Get the angle around the Z axis produced by the quaternion."""
+ rotated_X_axis = R.from_quat(quat).apply([1, 0, 0])
+ return np.arctan2(rotated_X_axis[1], rotated_X_axis[0])
+
+
+def z_rotation_from_quat(quat):
+ """Get the quaternion for the rotation around the Z axis produced by the quaternion."""
+ return R.from_euler("z", z_angle_from_quat(quat)).as_quat()
diff --git a/scripts/setup.sh b/scripts/setup.sh
index 238369233..0c6474f2b 100755
--- a/scripts/setup.sh
+++ b/scripts/setup.sh
@@ -28,7 +28,8 @@ echo -e "\nUsing [4m$conda_name[0m as the conda environment name\n"
# Get Python version from Isaac Sim
ISAAC_PYTHON_VERSION=$(${ISAAC_SIM_PATH}/python.sh -c "import platform; print(platform.python_version())")
-echo Using Python version [4m$ISAAC_PYTHON_VERSION[0m matching your current Isaac Sim version
+ISAAC_PYTHON_VERSION="${ISAAC_PYTHON_VERSION##*$'\n'}" # get rid of conda activation warnings
+echo Using Python version [4m$ISAAC_PYTHON_VERSION[0m matching your current Isaac Sim version
# Create a conda environment with the appropriate python version
source $(conda info --base)/etc/profile.d/conda.sh
diff --git a/setup.py b/setup.py
index 123ce37e2..59e357e5d 100644
--- a/setup.py
+++ b/setup.py
@@ -41,7 +41,8 @@
"termcolor",
"progressbar",
"pymeshlab",
- "click"
+ "click",
+ "aenum",
],
tests_require=[],
python_requires=">=3",
diff --git a/tests/test_primitives.py b/tests/test_primitives.py
new file mode 100644
index 000000000..03a3e6a48
--- /dev/null
+++ b/tests/test_primitives.py
@@ -0,0 +1,231 @@
+import numpy as np
+import omnigibson as og
+from omnigibson.macros import gm
+from omnigibson.action_primitives.starter_semantic_action_primitives import StarterSemanticActionPrimitives, StarterSemanticActionPrimitiveSet
+import omnigibson.utils.transform_utils as T
+from omnigibson.objects.dataset_object import DatasetObject
+
+def execute_controller(ctrl_gen, env):
+ for action in ctrl_gen:
+ env.step(action)
+
+def set_start_pose(robot):
+ reset_pose_tiago = np.array([
+ -1.78029833e-04, 3.20231302e-05, -1.85759447e-07, -1.16488536e-07,
+ 4.55182843e-08, 2.36128806e-04, 1.50000000e-01, 9.40000000e-01,
+ -1.10000000e+00, 0.00000000e+00, -0.90000000e+00, 1.47000000e+00,
+ 0.00000000e+00, 2.10000000e+00, 2.71000000e+00, 1.50000000e+00,
+ 1.71000000e+00, 1.30000000e+00, -1.57000000e+00, -1.40000000e+00,
+ 1.39000000e+00, 0.00000000e+00, 0.00000000e+00, 4.50000000e-02,
+ 4.50000000e-02, 4.50000000e-02, 4.50000000e-02,
+ ])
+ robot.set_joint_positions(reset_pose_tiago)
+ og.sim.step()
+
+def primitive_tester(load_object_categories, objects, primitives, primitives_args):
+ cfg = {
+ "scene": {
+ "type": "InteractiveTraversableScene",
+ "scene_model": "Rs_int",
+ "load_object_categories": load_object_categories,
+ },
+ "robots": [
+ {
+ "type": "Tiago",
+ "obs_modalities": ["scan", "rgb", "depth"],
+ "scale": 1.0,
+ "self_collisions": True,
+ "action_normalize": False,
+ "action_type": "continuous",
+ "grasping_mode": "sticky",
+ "rigid_trunk": False,
+ "default_arm_pose": "diagonal30",
+ "default_trunk_offset": 0.365,
+ "controller_config": {
+ "base": {
+ "name": "JointController",
+ "motor_type": "velocity"
+ },
+ "arm_left": {
+ "name": "JointController",
+ "motor_type": "position",
+ "command_input_limits": None,
+ "command_output_limits": None,
+ "use_delta_commands": False
+ },
+ "arm_right": {
+ "name": "JointController",
+ "motor_type": "position",
+ "command_input_limits": None,
+ "command_output_limits": None,
+ "use_delta_commands": False
+ },
+ "gripper_left": {
+ "name": "JointController",
+ "motor_type": "position",
+ "command_input_limits": [-1, 1],
+ "command_output_limits": None,
+ "use_delta_commands": True
+ },
+ "gripper_right": {
+ "name": "JointController",
+ "motor_type": "position",
+ "command_input_limits": [-1, 1],
+ "command_output_limits": None,
+ "use_delta_commands": True
+ },
+ "camera": {
+ "name": "JointController",
+ "motor_type": "position",
+ "command_input_limits": None,
+ "command_output_limits": None,
+ "use_delta_commands": False
+ }
+ }
+ }
+ ],
+ }
+
+ # Make sure sim is stopped
+ og.sim.stop()
+
+ # Make sure GPU dynamics are enabled (GPU dynamics needed for cloth) and no flatcache
+ gm.ENABLE_OBJECT_STATES = True
+ gm.USE_GPU_DYNAMICS = False
+ gm.ENABLE_FLATCACHE = False
+
+ # Create the environment
+ env = og.Environment(configs=cfg, action_timestep=1 / 60., physics_timestep=1 / 60.)
+ robot = env.robots[0]
+ env.reset()
+
+ for obj in objects:
+ og.sim.import_object(obj['object'])
+ obj['object'].set_position_orientation(obj['position'], obj['orientation'])
+ og.sim.step()
+
+ controller = StarterSemanticActionPrimitives(env)
+ set_start_pose(robot)
+ for primitive, args in zip(primitives, primitives_args):
+ try:
+ execute_controller(controller.apply_ref(primitive, *args), env)
+ except:
+ og.sim.clear()
+ return False
+
+ # Clear the sim
+ og.sim.clear()
+ return True
+
+def test_navigate():
+ categories = ["floors", "ceilings", "walls"]
+
+ objects = []
+ obj_1 = {
+ "object": DatasetObject(
+ name="cologne",
+ category="bottle_of_cologne",
+ model="lyipur"
+ ),
+ "position": [-0.3, -0.8, 0.5],
+ "orientation": [0, 0, 0, 1]
+ }
+ objects.append(obj_1)
+
+ primitives = [StarterSemanticActionPrimitiveSet.NAVIGATE_TO]
+ primitives_args = [(obj_1['object'],)]
+
+ assert primitive_tester(categories, objects, primitives, primitives_args)
+
+def test_grasp():
+ categories = ["floors", "ceilings", "walls", "coffee_table"]
+
+ objects = []
+ obj_1 = {
+ "object": DatasetObject(
+ name="cologne",
+ category="bottle_of_cologne",
+ model="lyipur"
+ ),
+ "position": [-0.3, -0.8, 0.5],
+ "orientation": [0, 0, 0, 1]
+ }
+ objects.append(obj_1)
+
+ primitives = [StarterSemanticActionPrimitiveSet.GRASP]
+ primitives_args = [(obj_1['object'],)]
+
+ assert primitive_tester(categories, objects, primitives, primitives_args)
+
+def test_place():
+ categories = ["floors", "ceilings", "walls", "coffee_table"]
+
+ objects = []
+ obj_1 = {
+ "object": DatasetObject(
+ name="table",
+ category="breakfast_table",
+ model="rjgmmy",
+ scale=[0.3, 0.3, 0.3]
+ ),
+ "position": [-0.7, 0.5, 0.2],
+ "orientation": [0, 0, 0, 1]
+ }
+ obj_2 = {
+ "object": DatasetObject(
+ name="cologne",
+ category="bottle_of_cologne",
+ model="lyipur"
+ ),
+ "position": [-0.3, -0.8, 0.5],
+ "orientation": [0, 0, 0, 1]
+ }
+ objects.append(obj_1)
+ objects.append(obj_2)
+
+ primitives = [StarterSemanticActionPrimitiveSet.GRASP, StarterSemanticActionPrimitiveSet.PLACE_ON_TOP]
+ primitives_args = [(obj_2['object'],), (obj_1['object'],)]
+
+ assert primitive_tester(categories, objects, primitives, primitives_args)
+
+def test_open_prismatic():
+ categories = ["floors"]
+
+ objects = []
+ obj_1 = {
+ "object": DatasetObject(
+ name="bottom_cabinet",
+ category="bottom_cabinet",
+ model="bamfsz",
+ scale=[0.7, 0.7, 0.7]
+ ),
+ "position": [-1.2, -0.4, 0.5],
+ "orientation": [0, 0, 0, 1]
+ }
+ objects.append(obj_1)
+
+ primitives = [StarterSemanticActionPrimitiveSet.OPEN]
+ primitives_args = [(obj_1['object'],)]
+
+ assert primitive_tester(categories, objects, primitives, primitives_args)
+
+def test_open_revolute():
+ categories = ["floors"]
+
+ objects = []
+ obj_1 = {
+ "object": DatasetObject(
+ name="fridge",
+ category="fridge",
+ model="dszchb",
+ scale=[0.7, 0.7, 0.7]
+ ),
+ "position": [-1.2, -0.4, 0.5],
+ "orientation": [0, 0, 0, 1]
+ }
+ objects.append(obj_1)
+
+ primitives = [StarterSemanticActionPrimitiveSet.OPEN]
+ primitives_args = [(obj_1['object'],)]
+
+ assert primitive_tester(categories, objects, primitives, primitives_args)
\ No newline at end of file
diff --git a/tests/test_symbolic_primitives.py b/tests/test_symbolic_primitives.py
new file mode 100644
index 000000000..77d845ded
--- /dev/null
+++ b/tests/test_symbolic_primitives.py
@@ -0,0 +1,303 @@
+import os
+import pytest
+import yaml
+
+from omnigibson.macros import gm
+gm.USE_GPU_DYNAMICS = True
+gm.USE_FLATCACHE = True
+
+import omnigibson as og
+from omnigibson import object_states
+from omnigibson.action_primitives.symbolic_semantic_action_primitives import SymbolicSemanticActionPrimitiveSet, SymbolicSemanticActionPrimitives
+from omnigibson.systems import get_system
+
+
+def start_env():
+ config = {
+ "env": {
+ "initial_pos_z_offset": 0.1
+ },
+ "render": {
+ "viewer_width": 1280,
+ "viewer_height": 720
+ },
+ "scene": {
+ "type": "InteractiveTraversableScene",
+ "scene_model": "Wainscott_0_int",
+ "load_object_categories": ["floors", "walls", "countertop", "fridge", "sink", "stove"],
+ "scene_source": "OG",
+ },
+ "robots": [
+ {
+ "type": "Fetch",
+ "obs_modalities": [
+ "scan",
+ "rgb",
+ "depth"
+ ],
+ "scale": 1,
+ "self_collisions": True,
+ "action_normalize": False,
+ "action_type": "continuous",
+ "grasping_mode": "sticky",
+ "disable_grasp_handling": True,
+ "rigid_trunk": False,
+ "default_trunk_offset": 0.365,
+ "default_arm_pose": "diagonal30",
+ "reset_joint_pos": "tuck",
+ "controller_config": {
+ "base": {
+ "name": "DifferentialDriveController"
+ },
+ "arm_0": {
+ "name": "JointController",
+ "motor_type": "position",
+ "command_input_limits": None,
+ "command_output_limits": None,
+ "use_delta_commands": False
+ },
+ "gripper_0": {
+ "name": "JointController",
+ "motor_type": "position",
+ "command_input_limits": [
+ -1,
+ 1
+ ],
+ "command_output_limits": None,
+ "use_delta_commands": True,
+ },
+ "camera": {
+ "name": "JointController",
+ "use_delta_commands": False
+ }
+ }
+ }
+ ],
+ "objects": [
+ {
+ "type": "DatasetObject",
+ "name": "pan",
+ "category": "frying_pan",
+ "model": "mhndon",
+ "position": [5.31, 10.75, 1.],
+ },
+ {
+ "type": "DatasetObject",
+ "name": "knife",
+ "category": "carving_knife",
+ "model": "awvoox",
+ "position": [5.31, 10.75, 1.2],
+ },
+ {
+ "type": "DatasetObject",
+ "name": "apple",
+ "category": "apple",
+ "model": "agveuv",
+ "position": [4.75, 10.75, 1.],
+ "bounding_box": [0.098, 0.098, 0.115]
+ },
+ {
+ "type": "DatasetObject",
+ "name": "sponge",
+ "category": "sponge",
+ "model": "qewotb",
+ "position": [4.75, 10.75, 1.],
+ },
+ ]
+ }
+
+ env = og.Environment(configs=config)
+
+ return env
+
+@pytest.fixture(scope="module")
+def shared_env():
+ """Load the environment just once using module scope."""
+ return start_env()
+
+@pytest.fixture(scope="function")
+def env(shared_env):
+ """Reset the environment before each test function."""
+ og.sim.scene.reset()
+ return shared_env
+
+@pytest.fixture
+def robot(env):
+ return env.robots[0]
+
+@pytest.fixture
+def prim_gen(env):
+ return SymbolicSemanticActionPrimitives(env)
+
+@pytest.fixture
+def countertop(env):
+ return next(iter(env.scene.object_registry("category", "countertop")))
+
+@pytest.fixture
+def fridge(env):
+ return next(iter(env.scene.object_registry("category", "fridge")))
+
+@pytest.fixture
+def stove(env):
+ return next(iter(env.scene.object_registry("category", "stove")))
+
+@pytest.fixture
+def sink(env):
+ return next(iter(env.scene.object_registry("category", "sink")))
+
+@pytest.fixture
+def pan(env):
+ return next(iter(env.scene.object_registry("category", "frying_pan")))
+
+@pytest.fixture
+def apple(env):
+ return next(iter(env.scene.object_registry("category", "apple")))
+
+@pytest.fixture
+def sponge(env):
+ return next(iter(env.scene.object_registry("category", "sponge")))
+
+@pytest.fixture
+def knife(env):
+ return next(iter(env.scene.object_registry("category", "carving_knife")))
+
+def test_in_hand_state(env, robot, prim_gen, apple):
+ assert not robot.states[object_states.IsGrasping].get_value(apple)
+ for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.GRASP, apple):
+ env.step(action)
+ assert robot.states[object_states.IsGrasping].get_value(apple)
+
+# def test_navigate():
+# pass
+
+def test_open(env, prim_gen, fridge):
+ assert not fridge.states[object_states.Open].get_value()
+ for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.OPEN, fridge):
+ env.step(action)
+ assert fridge.states[object_states.Open].get_value()
+
+def test_close(env, prim_gen, fridge):
+ fridge.states[object_states.Open].set_value(True)
+ assert fridge.states[object_states.Open].get_value()
+ for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.CLOSE, fridge):
+ env.step(action)
+ assert not fridge.states[object_states.Open].get_value()
+
+def test_place_inside(env, prim_gen, apple, fridge):
+ assert not apple.states[object_states.Inside].get_value(fridge)
+ assert not fridge.states[object_states.Open].get_value()
+ for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.OPEN, fridge):
+ env.step(action)
+ for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.GRASP, apple):
+ env.step(action)
+ for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.PLACE_INSIDE, fridge):
+ env.step(action)
+ assert apple.states[object_states.Inside].get_value(fridge)
+
+def test_place_ontop(env, prim_gen, apple, pan):
+ assert not apple.states[object_states.OnTop].get_value(pan)
+ for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.GRASP, apple):
+ env.step(action)
+ for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.PLACE_ON_TOP, pan):
+ env.step(action)
+ assert apple.states[object_states.OnTop].get_value(pan)
+
+def test_toggle_on(env, prim_gen, stove):
+ assert not stove.states[object_states.ToggledOn].get_value()
+ for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.TOGGLE_ON, stove):
+ env.step(action)
+ assert stove.states[object_states.ToggledOn].get_value()
+
+def test_soak_under(env, prim_gen, robot, sponge, sink):
+ water_system = get_system("water", force_active=True)
+ assert not sponge.states[object_states.Saturated].get_value(water_system)
+ assert not sink.states[object_states.ToggledOn].get_value()
+
+ # First toggle on the sink
+ for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.TOGGLE_ON, sink):
+ env.step(action)
+ assert sink.states[object_states.ToggledOn].get_value()
+
+ # Then grasp the sponge
+ for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.GRASP, sponge):
+ env.step(action)
+ assert robot.states[object_states.IsGrasping].get_value(sponge)
+
+ # Then soak the sponge under the water
+ for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.SOAK_UNDER, sink):
+ env.step(action)
+ assert sponge.states[object_states.Saturated].get_value(water_system)
+
+# def test_soak_inside():
+# pass
+
+def test_wipe(env, prim_gen, sponge, sink, countertop):
+ # Some pre-assertions
+ water_system = get_system("water", force_active=True)
+ assert not sponge.states[object_states.Saturated].get_value(water_system)
+ assert not sink.states[object_states.ToggledOn].get_value()
+
+ # Dirty the countertop as the setup
+ mud_system = get_system("mud", force_active=True)
+ countertop.states[object_states.Covered].set_value(mud_system, True)
+ assert countertop.states[object_states.Covered].get_value(mud_system)
+
+ # First toggle on the sink
+ for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.TOGGLE_ON, sink):
+ env.step(action)
+ assert sink.states[object_states.ToggledOn].get_value()
+
+ # Then grasp the sponge
+ for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.GRASP, sponge):
+ env.step(action)
+ assert robot.states[object_states.IsGrasping].get_value(sponge)
+
+ # Then soak the sponge under the water
+ for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.SOAK_UNDER, sink):
+ env.step(action)
+ assert sponge.states[object_states.Saturated].get_value(water_system)
+
+ # Wipe the countertop with the sponge
+ for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.WIPE, countertop):
+ env.step(action)
+ assert not countertop.states[object_states.Covered].get_value(mud_system)
+
+def test_cut(env, prim_gen, apple, knife, countertop):
+ # assert not apple.states[object_states.Cut].get_value(knife)
+ print("Grasping knife")
+ for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.GRASP, knife):
+ env.step(action)
+ for _ in range(60): env.step(prim_gen._empty_action())
+ print("Cutting apple")
+ for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.CUT, apple):
+ env.step(action)
+ for _ in range(60): env.step(prim_gen._empty_action())
+ print("Putting knife back on countertop")
+ for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.PLACE_ON_TOP, countertop):
+ env.step(action)
+
+# def test_place_near_heating_element():
+# pass
+
+# def test_wait_for_cooked():
+# pass
+
+def main():
+ env = start_env()
+ prim_gen = SymbolicSemanticActionPrimitives(env)
+ apple = next(iter(env.scene.object_registry("category", "apple")))
+ knife = next(iter(env.scene.object_registry("category", "carving_knife")))
+ countertop = next(iter(env.scene.object_registry("category", "countertop")))
+
+ print("Will start in 3 seconds")
+ for _ in range(180): env.step(prim_gen._empty_action())
+
+ try:
+ test_cut(env, prim_gen, apple, knife, countertop)
+ except:
+ raise
+ while True:
+ og.sim.step()
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file