Skip to content

Commit

Permalink
Merge pull request #1051 from StanfordVL/feat/np-opt
Browse files Browse the repository at this point in the history
Optional Numpy Compute Backend
  • Loading branch information
cremebrule authored Dec 19, 2024
2 parents 4cbc359 + a1fbadd commit fefe56e
Show file tree
Hide file tree
Showing 48 changed files with 2,488 additions and 577 deletions.
6 changes: 5 additions & 1 deletion omnigibson/action_primitives/curobo.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,7 @@ def __init__(
motion_cfg_kwargs=None,
batch_size=2,
use_cuda_graph=True,
collision_activation_distance=0.005,
debug=False,
use_default_embodiment_only=False,
):
Expand All @@ -165,6 +166,9 @@ def __init__(
MotionGenConfig.load_from_robot_config(...)
batch_size (int): Size of batches for computing trajectories. This must be FIXED
use_cuda_graph (bool): Whether to use CUDA graph for motion generation or not
collision_activation_distance (float): Distance threshold at which a collision is detected.
Increasing this value will make the motion planner more conservative in its planning with respect
to the underlying sphere representation of the robot
debug (bool): Whether to debug generation or not, setting this True will set use_cuda_graph to False implicitly
use_default_embodiment_only (bool): Whether to use only the default embodiment for the robot or not
"""
Expand Down Expand Up @@ -227,7 +231,7 @@ def __init__(
num_trajopt_seeds=4,
num_graph_seeds=4,
interpolation_dt=0.03,
collision_activation_distance=0.005,
collision_activation_distance=collision_activation_distance,
self_collision_check=True,
maximum_trajectory_dt=None,
fixed_iters_trajopt=True,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,8 @@
from omnigibson.robots.locomotion_robot import LocomotionRobot
from omnigibson.robots.manipulation_robot import ManipulationRobot
from omnigibson.tasks.behavior_task import BehaviorTask
from omnigibson.utils.control_utils import FKSolver, IKSolver, orientation_error
from omnigibson.utils.backend_utils import _compute_backend as cb
from omnigibson.utils.control_utils import FKSolver, IKSolver
from omnigibson.utils.grasping_planning_utils import get_grasp_poses_for_object_sticky, get_grasp_position_for_open
from omnigibson.utils.motion_planning_utils import (
detect_robot_collision_in_sim,
Expand Down Expand Up @@ -336,13 +337,13 @@ def __init__(
arm = f"arm_{arm_name}"
arm_ctrl = self.robot.controllers[arm]
if isinstance(arm_ctrl, InverseKinematicsController):
pos_relative = control_dict[f"{eef}_pos_relative"]
quat_relative = control_dict[f"{eef}_quat_relative"]
pos_relative = cb.to_torch(control_dict[f"{eef}_pos_relative"])
quat_relative = cb.to_torch(control_dict[f"{eef}_quat_relative"])
quat_relative_axis_angle = T.quat2axisangle(quat_relative)
self._arm_targets[arm] = (pos_relative, quat_relative_axis_angle)
else:

arm_target = control_dict["joint_position"][arm_ctrl.dof_idx]
arm_target = cb.to_torch(control_dict["joint_position"])[arm_ctrl.dof_idx]
self._arm_targets[arm] = arm_target

self.robot_copy = self._load_robot_copy()
Expand Down Expand Up @@ -1472,7 +1473,7 @@ def _empty_action(self, follow_arm_targets=True):
)
delta_pos = target_pos - current_pos
if controller.mode == "pose_delta_ori":
delta_orn = orientation_error(
delta_orn = T.orientation_error(
T.quat2mat(T.axisangle2quat(target_orn_axisangle)), T.quat2mat(current_orn)
)
partial_action = th.cat((delta_pos, delta_orn))
Expand Down
63 changes: 45 additions & 18 deletions omnigibson/controllers/controller_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

import torch as th

from omnigibson.utils.backend_utils import _compute_backend as cb
from omnigibson.utils.python_utils import Recreatable, Registerable, Serializable, assert_valid_key, classproperty

# Global dicts that will contain mappings
Expand Down Expand Up @@ -112,11 +113,11 @@ def __init__(
]
assert "has_limit" in control_limits, "Expected has_limit specified in control_limits, but does not exist."
self._dof_has_limits = control_limits["has_limit"]
self._dof_idx = dof_idx.int()
self._dof_idx = cb.as_int(dof_idx)

# Generate goal information
self._goal_shapes = self._get_goal_shapes()
self._goal_dim = int(sum(th.prod(th.tensor(shape)) for shape in self._goal_shapes.values()))
self._goal_dim = int(sum(cb.prod(cb.array(shape)) for shape in self._goal_shapes.values()))

# Initialize some other variables that will be filled in during runtime
self._control = None
Expand All @@ -127,15 +128,12 @@ def __init__(

# Standardize command input / output limits to be (min_array, max_array)
command_input_limits = (
(-1.0, 1.0)
self._generate_default_command_input_limits()
if type(command_input_limits) == str and command_input_limits == "default"
else command_input_limits
)
command_output_limits = (
(
self._control_limits[self.control_type][0][self.dof_idx],
self._control_limits[self.control_type][1][self.dof_idx],
)
self._generate_default_command_output_limits()
if type(command_output_limits) == str and command_output_limits == "default"
else command_output_limits
)
Expand All @@ -156,6 +154,31 @@ def __init__(
)
)

def _generate_default_command_input_limits(self):
"""
Generates default command input limits based on the control limits
Returns:
2-tuple:
- int or array: min command input limits
- int or array: max command input limits
"""
return (-1.0, 1.0)

def _generate_default_command_output_limits(self):
"""
Generates default command output limits based on the control limits
Returns:
2-tuple:
- int or array: min command output limits
- int or array: max command output limits
"""
return (
self._control_limits[self.control_type][0][self.dof_idx],
self._control_limits[self.control_type][1][self.dof_idx],
)

def _preprocess_command(self, command):
"""
Clips + scales inputted @command according to self.command_input_limits and self.command_output_limits.
Expand All @@ -169,7 +192,7 @@ def _preprocess_command(self, command):
Array[float]: Processed command vector
"""
# Make sure command is a th.tensor
command = th.tensor([command], dtype=th.float32) if type(command) in {int, float} else command
command = cb.array([command]) if type(command) in {int, float} else command
# We only clip and / or scale if self.command_input_limits exists
if self._command_input_limits is not None:
# Clip
Expand Down Expand Up @@ -342,7 +365,7 @@ def compute_no_op_action(self, control_dict):
if self._goal is None:
self._goal = self.compute_no_op_goal(control_dict=control_dict)
command = self._compute_no_op_action(control_dict=control_dict)
return self._reverse_preprocess_command(command)
return cb.to_torch(self._reverse_preprocess_command(command))

def _compute_no_op_action(self, control_dict):
"""
Expand All @@ -354,18 +377,26 @@ def _dump_state(self):
# Default is just the command
return dict(
goal_is_valid=self._goal is not None,
goal=self._goal,
goal=None if self._goal is None else {k: cb.to_torch(v) for k, v in self._goal.items()},
)

def _load_state(self, state):
# Make sure every entry in goal is a numpy array
# Load goal
self._goal = None if state["goal"] is None else {name: goal_state for name, goal_state in state["goal"].items()}
if state["goal"] is None:
self._goal = None
else:
self._goal = dict()
for name, goal_state in state["goal"].items():
if isinstance(goal_state, th.Tensor):
self._goal[name] = cb.from_torch(goal_state)
else:
self._goal[name] = goal_state

def serialize(self, state):
# Make sure size of the state is consistent, even if we have no goal
goal_state_flattened = (
th.cat([goal_state.flatten() for goal_state in self._goal.values()])
th.cat([goal_state.flatten() for goal_state in state["goal"].values()])
if (state)["goal_is_valid"]
else th.zeros(self.goal_dim)
)
Expand Down Expand Up @@ -419,12 +450,8 @@ def nums2array(nums, dim):
# Else, input is a single value, so we map to a numpy array of correct size and return
return (
nums
if isinstance(nums, th.Tensor)
else (
th.tensor(nums, dtype=th.float32)
if isinstance(nums, Iterable)
else th.ones(dim, dtype=th.float32) * nums
)
if isinstance(nums, cb.arr_type)
else cb.array(nums) if isinstance(nums, Iterable) else cb.ones(dim) * nums
)

@property
Expand Down
9 changes: 4 additions & 5 deletions omnigibson/controllers/dd_controller.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import torch as th

from omnigibson.controllers import ControlType, LocomotionController
from omnigibson.utils.backend_utils import _compute_backend as cb


class DifferentialDriveController(LocomotionController):
Expand Down Expand Up @@ -107,14 +106,14 @@ def compute_control(self, goal_dict, control_dict):
right_wheel_joint_vel = (lin_vel + ang_vel * self._wheel_axle_halflength) / self._wheel_radius

# Return desired velocities
return th.tensor([left_wheel_joint_vel, right_wheel_joint_vel])
return cb.array([left_wheel_joint_vel, right_wheel_joint_vel])

def compute_no_op_goal(self, control_dict):
# This is zero-vector, since we want zero linear / angular velocity
return dict(vel=th.zeros(2))
return dict(vel=cb.zeros(2))

def _compute_no_op_action(self, control_dict):
return th.zeros(2, dtype=th.float32)
return cb.zeros(2)

def _get_goal_shapes(self):
# Add (2, )-array representing linear, angular velocity
Expand Down
Loading

0 comments on commit fefe56e

Please sign in to comment.