From 3123d1df397d139b433e879573ad18dbe781220a Mon Sep 17 00:00:00 2001 From: hang-yin Date: Mon, 4 Nov 2024 14:19:47 -0800 Subject: [PATCH 01/76] Remove OMPL and robot copy from primitives --- .../starter_semantic_action_primitives.py | 218 +----------------- 1 file changed, 1 insertion(+), 217 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 1b1aec0d0..9dbacc895 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -38,13 +38,6 @@ from omnigibson.tasks.behavior_task import BehaviorTask from omnigibson.utils.control_utils import FKSolver, IKSolver, orientation_error from omnigibson.utils.grasping_planning_utils import get_grasp_poses_for_object_sticky, get_grasp_position_for_open -from omnigibson.utils.motion_planning_utils import ( - detect_robot_collision_in_sim, - plan_arm_motion, - plan_arm_motion_ik, - plan_base_motion, - set_base_and_detect_collision, -) from omnigibson.utils.object_state_utils import sample_cuboid_for_predicate from omnigibson.utils.python_utils import multi_dim_linspace from omnigibson.utils.ui_utils import create_module_logger @@ -112,147 +105,6 @@ def indented_print(msg, *args, **kwargs): print(" " * len(inspect.stack()) + str(msg), *args, **kwargs) -class RobotCopy: - """A data structure for storing information about a robot copy, used for collision checking in planning.""" - - def __init__(self): - self.prims = {} - self.meshes = {} - self.relative_poses = {} - self.links_relative_poses = {} - self.reset_pose = { - "original": (th.tensor([0, 0, -5.0], dtype=th.float32), th.tensor([0, 0, 0, 1], dtype=th.float32)), - "simplified": (th.tensor([5, 0, -5.0], dtype=th.float32), th.tensor([0, 0, 0, 1], dtype=th.float32)), - } - - -class PlanningContext(object): - """ - A context manager that sets up a robot copy for collision checking in planning. - """ - - def __init__(self, env, robot, robot_copy, robot_copy_type="original"): - self.env = env - self.robot = robot - self.robot_copy = robot_copy - self.robot_copy_type = robot_copy_type if robot_copy_type in robot_copy.prims.keys() else "original" - self.disabled_collision_pairs_dict = {} - - # For now, the planning context only works with Fetch and Tiago - assert isinstance(self.robot, (Fetch, Tiago)), "PlanningContext only works with Fetch and Tiago." - - def __enter__(self): - self._assemble_robot_copy() - self._construct_disabled_collision_pairs() - return self - - def __exit__(self, *args): - self._set_prim_pose( - self.robot_copy.prims[self.robot_copy_type], self.robot_copy.reset_pose[self.robot_copy_type] - ) - - def _assemble_robot_copy(self): - if m.TIAGO_TORSO_FIXED: - fk_descriptor = "left_fixed" - else: - fk_descriptor = ( - "combined" if "combined" in self.robot.robot_arm_descriptor_yamls else self.robot.default_arm - ) - self.fk_solver = FKSolver( - robot_description_path=self.robot.robot_arm_descriptor_yamls[fk_descriptor], - robot_urdf_path=self.robot.urdf_path, - ) - - # TODO: Remove the need for this after refactoring the FK / descriptors / etc. - arm_links = self.robot.manipulation_link_names - - if m.TIAGO_TORSO_FIXED: - assert self.arm == "left", "Fixed torso mode only supports left arm!" - joint_control_idx = self.robot.arm_control_idx["left"] - joint_pos = self.robot.get_joint_positions()[joint_control_idx] - else: - joint_combined_idx = th.cat([self.robot.trunk_control_idx, self.robot.arm_control_idx[fk_descriptor]]) - joint_pos = self.robot.get_joint_positions()[joint_combined_idx] - link_poses = self.fk_solver.get_link_poses(joint_pos, arm_links) - - # Assemble robot meshes - for link_name, meshes in self.robot_copy.meshes[self.robot_copy_type].items(): - for mesh_name, copy_mesh in meshes.items(): - # Skip grasping frame (this is necessary for Tiago, but should be cleaned up in the future) - if "grasping_frame" in link_name: - continue - # Set poses of meshes relative to the robot to construct the robot - link_pose = ( - link_poses[link_name] - if link_name in arm_links - else self.robot_copy.links_relative_poses[self.robot_copy_type][link_name] - ) - mesh_copy_pose = T.pose_transform( - *link_pose, *self.robot_copy.relative_poses[self.robot_copy_type][link_name][mesh_name] - ) - self._set_prim_pose(copy_mesh, mesh_copy_pose) - - def _set_prim_pose(self, prim, pose): - translation = lazy.pxr.Gf.Vec3d(*pose[0].tolist()) - prim.GetAttribute("xformOp:translate").Set(translation) - orientation = pose[1][[3, 0, 1, 2]] - prim.GetAttribute("xformOp:orient").Set(lazy.pxr.Gf.Quatd(*orientation.tolist())) - - def _construct_disabled_collision_pairs(self): - robot_meshes_copy = self.robot_copy.meshes[self.robot_copy_type] - - # Filter out collision pairs of meshes part of the same link - for meshes in robot_meshes_copy.values(): - for mesh in meshes.values(): - self.disabled_collision_pairs_dict[mesh.GetPrimPath().pathString] = [ - m.GetPrimPath().pathString for m in meshes.values() - ] - - # Filter out all self-collisions - if self.robot_copy_type == "simplified": - all_meshes = [ - mesh.GetPrimPath().pathString - for link in robot_meshes_copy.keys() - for mesh in robot_meshes_copy[link].values() - ] - for link in robot_meshes_copy.keys(): - for mesh in robot_meshes_copy[link].values(): - self.disabled_collision_pairs_dict[mesh.GetPrimPath().pathString] += all_meshes - # Filter out collision pairs of meshes part of disabled collision pairs - else: - for pair in self.robot.disabled_collision_pairs: - link_1 = pair[0] - link_2 = pair[1] - if link_1 in robot_meshes_copy.keys() and link_2 in robot_meshes_copy.keys(): - for mesh in robot_meshes_copy[link_1].values(): - self.disabled_collision_pairs_dict[mesh.GetPrimPath().pathString] += [ - m.GetPrimPath().pathString for m in robot_meshes_copy[link_2].values() - ] - - for mesh in robot_meshes_copy[link_2].values(): - self.disabled_collision_pairs_dict[mesh.GetPrimPath().pathString] += [ - m.GetPrimPath().pathString for m in robot_meshes_copy[link_1].values() - ] - - # Filter out colliders all robot copy meshes should ignore - disabled_colliders = [] - - # Disable original robot colliders so copy can't collide with it - disabled_colliders += [link.prim_path for link in self.robot.links.values()] - filter_categories = ["floors", "carpet"] - for obj in self.env.scene.objects: - if obj.category in filter_categories: - disabled_colliders += [link.prim_path for link in obj.links.values()] - - # Disable object in hand - obj_in_hand = self.robot._ag_obj_in_hand[self.robot.default_arm] - if obj_in_hand is not None: - disabled_colliders += [link.prim_path for link in obj_in_hand.links.values()] - - for colliders in self.disabled_collision_pairs_dict.values(): - colliders += disabled_colliders - - class StarterSemanticActionPrimitiveSet(IntEnum): _init_ = "value __doc__" GRASP = auto(), "Grasp an object" @@ -345,8 +197,6 @@ def __init__( arm_target = control_dict["joint_position"][arm_ctrl.dof_idx] self._arm_targets[arm] = arm_target - self.robot_copy = self._load_robot_copy() - @property def arm(self): if not isinstance(self.robot, ManipulationRobot): @@ -380,73 +230,6 @@ def _postprocess_action(self, action): context = action_type + context_function return action, context - def _load_robot_copy(self): - """Loads a copy of the robot that can be manipulated into arbitrary configurations for collision checking in planning.""" - robot_copy = RobotCopy() - - robots_to_copy = {"original": {"robot": self.robot, "copy_path": self.robot.prim_path + "_copy"}} - - for robot_type, rc in robots_to_copy.items(): - copy_robot = None - copy_robot_meshes = {} - copy_robot_meshes_relative_poses = {} - copy_robot_links_relative_poses = {} - - # Create prim under which robot meshes are nested and set position - lazy.omni.usd.commands.CreatePrimCommand("Xform", rc["copy_path"]).do() - copy_robot = lazy.omni.isaac.core.utils.prims.get_prim_at_path(rc["copy_path"]) - reset_pose = robot_copy.reset_pose[robot_type] - translation = lazy.pxr.Gf.Vec3d(*reset_pose[0].tolist()) - copy_robot.GetAttribute("xformOp:translate").Set(translation) - orientation = reset_pose[1][[3, 0, 1, 2]] - copy_robot.GetAttribute("xformOp:orient").Set(lazy.pxr.Gf.Quatd(*orientation.tolist())) - - robot_to_copy = None - if robot_type == "simplified": - robot_to_copy = rc["robot"] - self.env.scene.add_object(robot_to_copy) - else: - robot_to_copy = rc["robot"] - - # Copy robot meshes - for link in robot_to_copy.links.values(): - link_name = link.prim_path.split("/")[-1] - for mesh_name, mesh in link.collision_meshes.items(): - split_path = mesh.prim_path.split("/") - # Do not copy grasping frame (this is necessary for Tiago, but should be cleaned up in the future) - if "grasping_frame" in link_name: - continue - - copy_mesh_path = rc["copy_path"] + "/" + link_name - copy_mesh_path += f"_{split_path[-1]}" if split_path[-1] != "collisions" else "" - lazy.omni.usd.commands.CopyPrimCommand(mesh.prim_path, path_to=copy_mesh_path).do() - copy_mesh = lazy.omni.isaac.core.utils.prims.get_prim_at_path(copy_mesh_path) - relative_pose = T.relative_pose_transform( - *mesh.get_position_orientation(), *link.get_position_orientation() - ) - relative_pose = (relative_pose[0], th.tensor([0, 0, 0, 1])) - if link_name not in copy_robot_meshes.keys(): - copy_robot_meshes[link_name] = {mesh_name: copy_mesh} - copy_robot_meshes_relative_poses[link_name] = {mesh_name: relative_pose} - else: - copy_robot_meshes[link_name][mesh_name] = copy_mesh - copy_robot_meshes_relative_poses[link_name][mesh_name] = relative_pose - - copy_robot_links_relative_poses[link_name] = T.relative_pose_transform( - *link.get_position_orientation(), *self.robot.get_position_orientation() - ) - - if robot_type == "simplified": - self.env.scene.remove_object(robot_to_copy) - - robot_copy.prims[robot_type] = copy_robot - robot_copy.meshes[robot_type] = copy_robot_meshes - robot_copy.relative_poses[robot_type] = copy_robot_meshes_relative_poses - robot_copy.links_relative_poses[robot_type] = copy_robot_links_relative_poses - - og.sim.step() - return robot_copy - def get_action_space(self): # TODO: Figure out how to implement what happens when the set of objects in scene changes. if self._task_relevant_objects_only: @@ -1626,6 +1409,7 @@ def _navigate_to_pose(self, pose_2d): def _draw_plan(self, plan): SEARCHED = [] trav_map = self.env.scene._trav_map + # TODO: implement this for curobo for q in plan: # The below code is useful for plotting the RRT tree. map_point = trav_map.world_to_map((q[0], q[1])) From 5871adc090af57f57caa5b63588f37176cbff0cb Mon Sep 17 00:00:00 2001 From: hang-yin Date: Tue, 5 Nov 2024 11:10:16 -0800 Subject: [PATCH 02/76] Refactor action primitives and replace OMPL with curobo for manipulation --- .../action_primitive_set_base.py | 20 +- .../starter_semantic_action_primitives.py | 326 +++++++----------- .../symbolic_semantic_action_primitives.py | 6 +- 3 files changed, 142 insertions(+), 210 deletions(-) diff --git a/omnigibson/action_primitives/action_primitive_set_base.py b/omnigibson/action_primitives/action_primitive_set_base.py index 15f77026a..a5cf53261 100644 --- a/omnigibson/action_primitives/action_primitive_set_base.py +++ b/omnigibson/action_primitives/action_primitive_set_base.py @@ -58,19 +58,13 @@ def __init_subclass__(cls, **kwargs): if not inspect.isabstract(cls): REGISTERED_PRIMITIVE_SETS[cls.__name__] = cls - def __init__(self, env): - self.env = env + def __init__(self, robot): + self.robot = robot - @property - def robot(self): - # Currently returns the first robot in the environment, but can be scaled to multiple robots - # by creating multiple action generators and passing in a robot index etc. - return self.env.robots[0] - - @abstractmethod - def get_action_space(self): - """Get the higher-level action space as an OpenAI Gym Space object.""" - pass + # @abstractmethod + # def get_action_space(self): + # """Get the higher-level action space as an OpenAI Gym Space object.""" + # pass @abstractmethod def apply(self, action): @@ -84,7 +78,7 @@ def apply(self, action): pass @abstractmethod - def apply_ref(self, action, *args): + def apply_ref(self, primitive, *args): """ Apply a primitive action by reference. diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 9dbacc895..522cbdc04 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -7,10 +7,8 @@ """ import inspect -import logging import math import random -from functools import cached_property import cv2 import gymnasium as gym @@ -27,11 +25,10 @@ ActionPrimitiveErrorGroup, BaseActionPrimitiveSet, ) +from omnigibson.action_primitives.curobo import CuroboEmbodimentSelection, CuRoboMotionGenerator from omnigibson.controllers import DifferentialDriveController, InverseKinematicsController, JointController -from omnigibson.controllers.controller_base import ControlType from omnigibson.macros import create_module_macros from omnigibson.objects.object_base import BaseObject -from omnigibson.objects.usd_object import USDObject from omnigibson.robots import * from omnigibson.robots.locomotion_robot import LocomotionRobot from omnigibson.robots.manipulation_robot import ManipulationRobot @@ -71,6 +68,8 @@ m.MAX_STEPS_FOR_SETTLING = 500 +m.MAX_STEPS_FOR_JOINT_MOTION = 500 + m.MAX_CARTESIAN_HAND_STEP = 0.002 m.MAX_STEPS_FOR_HAND_MOVE_JOINT = 500 m.MAX_STEPS_FOR_HAND_MOVE_IK = 1000 @@ -124,8 +123,7 @@ class StarterSemanticActionPrimitiveSet(IntEnum): class StarterSemanticActionPrimitives(BaseActionPrimitiveSet): def __init__( self, - env, - add_context=False, + robot, enable_head_tracking=True, always_track_eef=False, task_relevant_objects_only=False, @@ -134,8 +132,7 @@ def __init__( Initializes a StarterSemanticActionPrimitives generator. Args: - env (Environment): The environment that the primitives will run on. - add_context (bool): Whether to add text context to the return value. Defaults to False. + robot (BaseRobot): The robot that the primitives will run on. enable_head_tracking (bool): Whether to enable head tracking. Defaults to True. always_track_eef (bool, optional): Whether to always track the end effector, as opposed to switching between target object and end effector based on context. Defaults to False. @@ -146,7 +143,9 @@ def __init__( "The StarterSemanticActionPrimitive is a work-in-progress and is only provided as an example. " "It currently only works with Fetch and Tiago with their JointControllers set to delta mode." ) - super().__init__(env) + super().__init__(robot) + # TODO: Make this batch_size a parameter? + self._motion_generator = CuRoboMotionGenerator(robot=self.robot, batch_size=1) self.controller_functions = { StarterSemanticActionPrimitiveSet.GRASP: self._grasp, StarterSemanticActionPrimitiveSet.PLACE_ON_TOP: self._place_on_top, @@ -160,18 +159,10 @@ def __init__( } # Validate the robot if isinstance(self.robot, LocomotionRobot): - assert isinstance( - self.robot.controllers["base"], (JointController, DifferentialDriveController) - ), "StarterSemanticActionPrimitives only works with a JointController or DifferentialDriveController at the robot base." - if self._base_controller_is_joint: - assert not self.robot.controllers[ - "base" - ].use_delta_commands, ( - "StarterSemanticActionPrimitives only works with a base JointController with absolute mode." - ) - - self.robot_model = self.robot.model_name - self.add_context = add_context + base_controller = self.robot.controllers["base"] + assert ( + isinstance(base_controller, (JointController)) and not base_controller.use_delta_commands + ), "StarterSemanticActionPrimitives only works with a JointController with absolute mode at the robot base." self._task_relevant_objects_only = task_relevant_objects_only @@ -203,50 +194,29 @@ def arm(self): raise ValueError("Cannot use arm for non-manipulation robot") return self.robot.default_arm - @property - def _base_controller_is_joint(self): - return isinstance(self.robot.controllers["base"], JointController) - def _postprocess_action(self, action): - """Postprocesses action by applying head tracking and adding context if necessary.""" + """Postprocesses action by applying head tracking.""" if self._enable_head_tracking: action = self._overwrite_head_action(action) + return action - if not self.add_context: - return action - - stack = inspect.stack() - action_type = "manip:" - context_function = stack[1].function - - for frame_info in stack[1:]: - function_name = frame_info.function - # TODO: Make this stop at apply_ref - if function_name in ["_grasp", "_place_on_top", "_place_or_top", "_open_or_close"]: - break - if "nav" in function_name: - action_type = "nav" - - context = action_type + context_function - return action, context - - def get_action_space(self): - # TODO: Figure out how to implement what happens when the set of objects in scene changes. - if self._task_relevant_objects_only: - assert isinstance( - self.env.task, BehaviorTask - ), "Activity relevant objects can only be used for BEHAVIOR tasks" - self.addressable_objects = sorted(set(self.env.task.object_scope.values()), key=lambda obj: obj.name) - else: - self.addressable_objects = sorted(set(self.env.scene.objects_by_name.values()), key=lambda obj: obj.name) - - # Filter out the robots. - self.addressable_objects = [obj for obj in self.addressable_objects if not isinstance(obj, BaseRobot)] - - self.num_objects = len(self.addressable_objects) - return gym.spaces.Tuple( - [gym.spaces.Discrete(self.num_objects), gym.spaces.Discrete(len(StarterSemanticActionPrimitiveSet))] - ) + # def get_action_space(self): + # # TODO: Figure out how to implement what happens when the set of objects in scene changes. + # if self._task_relevant_objects_only: + # assert isinstance( + # self.env.task, BehaviorTask + # ), "Activity relevant objects can only be used for BEHAVIOR tasks" + # self.addressable_objects = sorted(set(self.env.task.object_scope.values()), key=lambda obj: obj.name) + # else: + # self.addressable_objects = sorted(set(self.env.scene.objects_by_name.values()), key=lambda obj: obj.name) + + # # Filter out the robots. + # self.addressable_objects = [obj for obj in self.addressable_objects if not isinstance(obj, BaseRobot)] + + # self.num_objects = len(self.addressable_objects) + # return gym.spaces.Tuple( + # [gym.spaces.Discrete(self.num_objects), gym.spaces.Discrete(len(StarterSemanticActionPrimitiveSet))] + # ) def get_action_from_primitive_and_object(self, primitive: StarterSemanticActionPrimitiveSet, obj: BaseObject): assert obj in self.addressable_objects @@ -274,12 +244,12 @@ def apply(self, action): action = StarterSemanticActionPrimitiveSet(action_idx) return self.apply_ref(action, target_obj) - def apply_ref(self, prim, *args, attempts=5): + def apply_ref(self, primitive, *args, attempts=5): """ Yields action for robot to execute the primitive with the given arguments. Args: - prim (StarterSemanticActionPrimitiveSet): Primitive to execute + primitive (StarterSemanticActionPrimitiveSet): Primitive to execute args: Arguments for the primitive attempts (int): Number of attempts to make before raising an error @@ -290,7 +260,7 @@ def apply_ref(self, prim, *args, attempts=5): ActionPrimitiveError: If primitive fails to execute """ assert attempts > 0, "Must make at least one attempt" - ctrl = self.controller_functions[prim] + ctrl = self.controller_functions[primitive] errors = [] for _ in range(attempts): @@ -758,7 +728,7 @@ def _ik_solver_cartesian_to_joint_space(self, relative_target_pose): return joint_pos - def _move_hand(self, target_pose, stop_if_stuck=False): + def _move_hand(self, target_pose, arm=None, stop_if_stuck=False): """ Yields action for the robot to move hand so the eef is in the target pose using the planner @@ -768,83 +738,67 @@ def _move_hand(self, target_pose, stop_if_stuck=False): Returns: th.tensor or None: Action array for one step for the robot to move hand or None if its at the target pose """ + if arm is None: + arm = self.arm yield from self._settle_robot() - controller_config = self.robot._controller_config["arm_" + self.arm] - if controller_config["name"] == "InverseKinematicsController": - target_pose_relative = self._get_pose_in_robot_frame(target_pose) - yield from self._move_hand_ik(target_pose_relative, stop_if_stuck=stop_if_stuck) - else: - joint_pos = self._convert_cartesian_to_joint_space(target_pose) - yield from self._move_hand_joint(joint_pos) - - def _move_hand_joint(self, joint_pos): - """ - Yields action for the robot to move arm to reach the specified joint positions using the planner - - Args: - joint_pos (th.tensor): Joint positions for the arm - - Returns: - th.tensor or None: Action array for one step for the robot to move arm or None if its at the joint positions - """ - with PlanningContext(self.env, self.robot, self.robot_copy, "original") as context: - plan = plan_arm_motion( - robot=self.robot, - end_conf=joint_pos, - context=context, - torso_fixed=m.TIAGO_TORSO_FIXED, - ) - - # plan = self._add_linearly_interpolated_waypoints(plan, 0.1) - if plan is None: + # curobo motion generator takes a pose but outputs joint positions + left_hand_pos, left_hand_quat = target_pose if arm == "left" else self.robot.get_eef_pose(arm="left") + right_hand_pos, right_hand_quat = target_pose if arm == "right" else self.robot.get_eef_pose(arm="right") + target_pos = { + self.robot.eef_link_names["left"]: left_hand_pos, + self.robot.eef_link_names["right"]: right_hand_pos, + } + target_quat = { + self.robot.eef_link_names["left"]: left_hand_quat, + self.robot.eef_link_names["right"]: right_hand_quat, + } + successes, traj_paths = self._motion_generator.compute_trajectories( + target_pos=target_pos, + target_quat=target_quat, + is_local=False, + max_attempts=5, + timeout=60.0, + ik_fail_return=5, + enable_finetune_trajopt=True, + finetune_attempts=1, + return_full_result=False, + success_ratio=1.0, + emb_sel=CuroboEmbodimentSelection.ARM, + ) + success, traj_path = successes[0], traj_paths[0] + if not success: raise ActionPrimitiveError( ActionPrimitiveError.Reason.PLANNING_ERROR, - "There is no accessible path from where you are to the desired joint position. Try again", + "There is no accessible path from where you are to the desired pose. Try again", ) - # Follow the plan to navigate. - indented_print(f"Plan has {len(plan)} steps") - for i, joint_pos in enumerate(plan): - indented_print(f"Executing arm movement plan step {i + 1}/{len(plan)}") - yield from self._move_hand_direct_joint(joint_pos, ignore_failure=True) - - def _move_hand_ik(self, eef_pose, stop_if_stuck=False): - """ - Yields action for the robot to move arm to reach the specified eef positions using the planner - - Args: - eef_pose (th.tensor): End Effector pose for the arm - - Returns: - th.tensor or None: Action array for one step for the robot to move arm or None if its at the joint positions - """ - eef_pos = eef_pose[0] - eef_ori = T.quat2axisangle(eef_pose[1]) - end_conf = th.cat((eef_pos, eef_ori)) - - with PlanningContext(self.env, self.robot, self.robot_copy, "original") as context: - plan = plan_arm_motion_ik( - robot=self.robot, - end_conf=end_conf, - context=context, - torso_fixed=m.TIAGO_TORSO_FIXED, - ) + q_traj = self._motion_generator.path_to_joint_trajectory(traj_path, CuroboEmbodimentSelection.ARM) + + # Follow the plan to move the eef + indented_print(f"Plan has {len(q_traj)} steps") + for i, joint_pos in enumerate(q_traj): + indented_print(f"Executing arm movement plan step {i + 1}/{len(q_traj)}") + yield from self._execute_joint_motion(joint_pos.cpu()) + + def _execute_joint_motion(self, q, ignore_failure=False): + # Convert target joint positions to command + action = [] + for controller in self.robot.controllers.values(): + action.append(q[controller.dof_idx]) + action = th.cat(action, dim=0) + assert action.shape[0] == self.robot.action_dim + + for i in range(m.MAX_STEPS_FOR_JOINT_MOTION): + current_joint_pos = self.robot.get_joint_positions() + joint_pos_diff = q - current_joint_pos + if th.max(th.abs(joint_pos_diff)).item() < m.JOINT_POS_DIFF_THRESHOLD: + return + yield self._postprocess_action(action) - # plan = self._add_linearly_interpolated_waypoints(plan, 0.1) - if plan is None: + if not ignore_failure: raise ActionPrimitiveError( - ActionPrimitiveError.Reason.PLANNING_ERROR, - "There is no accessible path from where you are to the desired joint position. Try again", - ) - - # Follow the plan to navigate. - indented_print(f"Plan has {len(plan)} steps") - for i, target_pose in enumerate(plan): - target_pos = target_pose[:3] - target_quat = T.axisangle2quat(target_pose[3:]) - indented_print(f"Executing grasp plan step {i + 1}/{len(plan)}") - yield from self._move_hand_direct_ik( - (target_pos, target_quat), ignore_failure=True, in_world_frame=False, stop_if_stuck=stop_if_stuck + ActionPrimitiveError.Reason.EXECUTION_ERROR, + "Could not reach the target joint positions. Try again", ) def _add_linearly_interpolated_waypoints(self, plan, max_inter_dist): @@ -1173,7 +1127,7 @@ def _overwrite_head_action(self, action): else: target_obj_pose = self._tracking_object.get_position_orientation() - assert self.robot_model == "Tiago", "Tracking object with camera is currently only supported for Tiago" + assert isinstance(self.robot, Tiago), "Tracking object with camera is currently only supported for Tiago" head_q = self._get_head_goal_q(target_obj_pose) head_idx = self.robot.controller_action_idx["camera"] @@ -1278,43 +1232,36 @@ def _empty_action(self, follow_arm_targets=True): action[action_idx] = partial_action return action - def _reset_hand(self): + def _reset_hand(self, arm=None): """ Yields action to move the hand to the position optimal for executing subsequent action primitives Returns: th.tensor or None: Action array for one step for the robot to reset its hand or None if it is done resetting """ - controller_config = self.robot._controller_config["arm_" + self.arm] - if controller_config["name"] == "InverseKinematicsController": - indented_print("Resetting hand") - reset_eef_pose = self._get_reset_eef_pose() - try: - yield from self._move_hand_ik(reset_eef_pose) - except ActionPrimitiveError: - indented_print("Could not do a planned reset of the hand - probably obj_in_hand collides with body") - yield from self._move_hand_direct_ik(reset_eef_pose, ignore_failure=True, in_world_frame=False) - else: - indented_print("Resetting hand") - reset_pose = self._get_reset_joint_pos()[self._manipulation_control_idx] - try: - yield from self._move_hand_joint(reset_pose) - except ActionPrimitiveError: - indented_print("Could not do a planned reset of the hand - probably obj_in_hand collides with body") - yield from self._move_hand_direct_joint(reset_pose, ignore_failure=True) + if arm is None: + arm = self.arm + indented_print("Resetting hand") + # TODO: make this work with both hands + reset_eef_pose = self._get_reset_eef_pose() + try: + yield from self._move_hand(reset_pose, arm=arm) + except ActionPrimitiveError: + indented_print("Could not do a planned reset of the hand - probably obj_in_hand collides with body") + yield from self._execute_joint_motion(reset_pose, ignore_failure=True) def _get_reset_eef_pose(self): # TODO: Add support for Fetch - if self.robot_model == "Tiago": + if isinstance(self.robot, Tiago): return th.tensor([0.28493954, 0.37450749, 1.1512334]), th.tensor( [-0.21533823, 0.05361032, -0.08631776, 0.97123871] ) - elif self.robot_model == "Fetch": + elif isinstance(self.robot, Fetch): return th.tensor([0.48688125, -0.12507881, 0.97888719]), th.tensor( [0.61324748, 0.61305553, -0.35266518, 0.35173529] ) else: - raise ValueError(f"Unsupported robot model: {self.robot_model}") + raise ValueError(f"Unsupported robot: {type(self.robot)}") def _get_reset_joint_pos(self): reset_pose_fetch = th.tensor( @@ -1367,12 +1314,12 @@ def _get_reset_joint_pos(self): 4.50000000e-02, ] ) - if self.robot_model == "Fetch": + if isinstance(self.robot, Fetch): return reset_pose_fetch - elif self.robot_model == "Tiago": + elif isinstance(self.robot, Tiago): return reset_pose_tiago else: - raise ValueError(f"Unsupported robot model: {self.robot_model}") + raise ValueError(f"Unsupported robot model: {type(self.robot)}") def _navigate_to_pose(self, pose_2d): """ @@ -1406,10 +1353,10 @@ def _navigate_to_pose(self, pose_2d): low_precision = True if i < len(plan) - 1 else False yield from self._navigate_to_pose_direct(pose_2d, low_precision=low_precision) + # TODO: implement this for curobo def _draw_plan(self, plan): SEARCHED = [] - trav_map = self.env.scene._trav_map - # TODO: implement this for curobo + trav_map = self.robot.scene._trav_map for q in plan: # The below code is useful for plotting the RRT tree. map_point = trav_map.world_to_map((q[0], q[1])) @@ -1497,20 +1444,17 @@ def _navigate_to_pose_direct(self, pose_2d, low_precision=False): yield from self._rotate_in_place(intermediate_pose, angle_threshold=m.DEFAULT_ANGLE_THRESHOLD) else: action = self._empty_action() - if self._base_controller_is_joint: - base_action_size = self.robot.controller_action_idx["base"].numel() - assert ( - base_action_size == 3 - ), "Currently, the action primitives only support [x, y, theta] joint controller" - direction_vec = ( - body_target_pose[0][:2] / th.norm(body_target_pose[0][:2]) * m.KP_LIN_VEL[type(self.robot)] - ) - base_action = th.tensor([direction_vec[0], direction_vec[1], 0.0], dtype=th.float32) - action[self.robot.controller_action_idx["base"]] = base_action - else: - # Diff drive controller - base_action = th.tensor([m.KP_LIN_VEL[type(self.robot)], 0.0], dtype=th.float32) - action[self.robot.controller_action_idx["base"]] = base_action + + base_action_size = self.robot.controller_action_idx["base"].numel() + assert ( + base_action_size == 3 + ), "Currently, the action primitives only support [x, y, theta] joint controller" + + base_action = th.tensor( + [body_target_pose[0][0], body_target_pose[0][1], body_target_pose[1]], dtype=th.float32 + ) + action[self.robot.controller_action_idx["base"]] = base_action + yield self._postprocess_action(action) body_target_pose = self._get_pose_in_robot_frame(end_pose) @@ -1544,25 +1488,19 @@ def _rotate_in_place(self, end_pose, angle_threshold=m.DEFAULT_ANGLE_THRESHOLD): action = self._empty_action() - direction = -1.0 if diff_yaw < 0.0 else 1.0 - ang_vel = m.KP_ANGLE_VEL[type(self.robot)] * direction - - if isinstance(self.robot, Locobot) or isinstance(self.robot, Freight): - # Locobot and Freight wheel joints are reversed - ang_vel = -ang_vel + # TODO: test to see if we still need this + # if isinstance(self.robot, Locobot) or isinstance(self.robot, Freight): + # # Locobot and Freight wheel joints are reversed + # ang_vel = -ang_vel base_action = action[self.robot.controller_action_idx["base"]] - if not self._base_controller_is_joint: - base_action[0] = 0.0 - base_action[1] = ang_vel - else: - assert ( - base_action.numel() == 3 - ), "Currently, the action primitives only support [x, y, theta] joint controller" - base_action[0] = 0.0 - base_action[1] = 0.0 - base_action[2] = ang_vel + assert ( + base_action.numel() == 3 + ), "Currently, the action primitives only support [x, y, theta] joint controller" + base_action[0] = 0.0 + base_action[1] = 0.0 + base_action[2] = T.quat2euler(body_target_pose[1])[2].item() action[self.robot.controller_action_idx["base"]] = base_action yield self._postprocess_action(action) @@ -1614,9 +1552,9 @@ def _sample_pose_near_object(self, obj, pose_on_obj=None, **kwargs): obj_rooms = ( obj.in_rooms if obj.in_rooms - else [self.env.scene._seg_map.get_room_instance_by_point(pose_on_obj[0][:2])] + else [self.robot.scene._seg_map.get_room_instance_by_point(pose_on_obj[0][:2])] ) - if self.env.scene._seg_map.get_room_instance_by_point(pose_2d[:2]) not in obj_rooms: + if self.robot.scene._seg_map.get_room_instance_by_point(pose_2d[:2]) not in obj_rooms: indented_print("Candidate position is in the wrong room.") continue diff --git a/omnigibson/action_primitives/symbolic_semantic_action_primitives.py b/omnigibson/action_primitives/symbolic_semantic_action_primitives.py index a30b95420..74d588ed3 100644 --- a/omnigibson/action_primitives/symbolic_semantic_action_primitives.py +++ b/omnigibson/action_primitives/symbolic_semantic_action_primitives.py @@ -59,12 +59,12 @@ def __init__(self, env): SymbolicSemanticActionPrimitiveSet.RELEASE: self._release, } - def apply_ref(self, prim, *args, attempts=3): + def apply_ref(self, primitive, *args, attempts=3): """ Yields action for robot to execute the primitive with the given arguments. Args: - prim (SymbolicSemanticActionPrimitiveSet): Primitive to execute + primitive (SymbolicSemanticActionPrimitiveSet): Primitive to execute args: Arguments for the primitive attempts (int): Number of attempts to make before raising an error @@ -75,7 +75,7 @@ def apply_ref(self, prim, *args, attempts=3): ActionPrimitiveError: If primitive fails to execute """ assert attempts > 0, "Must make at least one attempt" - ctrl = self.controller_functions[prim] + ctrl = self.controller_functions[primitive] if any(isinstance(arg, BaseRobot) for arg in args): raise ActionPrimitiveErrorGroup( From f902e868fb4ff7853307c04cd39c84560ba80b97 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Wed, 6 Nov 2024 11:34:24 -0800 Subject: [PATCH 03/76] Minor primitives refactoring --- omnigibson/examples/action_primitives/rs_int_example.py | 2 +- omnigibson/examples/action_primitives/solve_simple_task.py | 2 +- .../examples/action_primitives/wip_solve_behavior_task.py | 2 +- omnigibson/robots/r1.py | 3 ++- tests/test_primitives.py | 2 +- tests/test_robot_states_flatcache.py | 6 +++--- 6 files changed, 9 insertions(+), 8 deletions(-) diff --git a/omnigibson/examples/action_primitives/rs_int_example.py b/omnigibson/examples/action_primitives/rs_int_example.py index fbecc2595..c9f856523 100644 --- a/omnigibson/examples/action_primitives/rs_int_example.py +++ b/omnigibson/examples/action_primitives/rs_int_example.py @@ -51,7 +51,7 @@ def main(): # Allow user to move camera more easily og.sim.enable_viewer_camera_teleoperation() - controller = StarterSemanticActionPrimitives(env, enable_head_tracking=False) + controller = StarterSemanticActionPrimitives(robot, enable_head_tracking=False) cabinet = scene.object_registry("name", "bottom_cabinet_slgzfc_0") apple = scene.object_registry("name", "apple") diff --git a/omnigibson/examples/action_primitives/solve_simple_task.py b/omnigibson/examples/action_primitives/solve_simple_task.py index 168e3d239..d20a16242 100644 --- a/omnigibson/examples/action_primitives/solve_simple_task.py +++ b/omnigibson/examples/action_primitives/solve_simple_task.py @@ -60,7 +60,7 @@ def main(): # Allow user to move camera more easily og.sim.enable_viewer_camera_teleoperation() - controller = StarterSemanticActionPrimitives(env, enable_head_tracking=False) + controller = StarterSemanticActionPrimitives(robot, enable_head_tracking=False) # Grasp of cologne grasp_obj = scene.object_registry("name", "cologne") diff --git a/omnigibson/examples/action_primitives/wip_solve_behavior_task.py b/omnigibson/examples/action_primitives/wip_solve_behavior_task.py index 3036fc42e..75202dfe7 100644 --- a/omnigibson/examples/action_primitives/wip_solve_behavior_task.py +++ b/omnigibson/examples/action_primitives/wip_solve_behavior_task.py @@ -52,7 +52,7 @@ def main(): # Allow user to move camera more easily og.sim.enable_viewer_camera_teleoperation() - controller = StarterSemanticActionPrimitives(env, enable_head_tracking=False) + controller = StarterSemanticActionPrimitives(robot, enable_head_tracking=False) # Grasp can of soda grasp_obj = env.task.object_scope["can__of__soda.n.01_2"] diff --git a/omnigibson/robots/r1.py b/omnigibson/robots/r1.py index 58d945e34..fe3a1b5d1 100644 --- a/omnigibson/robots/r1.py +++ b/omnigibson/robots/r1.py @@ -1,3 +1,4 @@ +import math import os import torch as th @@ -258,7 +259,7 @@ def urdf_path(self): @property def arm_workspace_range(self): - return {arm: [th.deg2rad(-45), th.deg2rad(45)] for arm in self.arm_names} + return {arm: th.tensor([th.deg2rad(th.tensor(-45)), th.deg2rad(th.tensor(45))]) for arm in self.arm_names} @property def eef_usd_path(self): diff --git a/tests/test_primitives.py b/tests/test_primitives.py index f77827c9d..8b20fc084 100644 --- a/tests/test_primitives.py +++ b/tests/test_primitives.py @@ -76,7 +76,7 @@ def primitive_tester(env, objects, primitives, primitives_args): obj["object"].set_position_orientation(position=obj["position"], orientation=obj["orientation"]) og.sim.step() - controller = StarterSemanticActionPrimitives(env, enable_head_tracking=False) + controller = StarterSemanticActionPrimitives(env.robots[0], enable_head_tracking=False) try: for primitive, args in zip(primitives, primitives_args): execute_controller(controller.apply_ref(primitive, *args, attempts=1), env) diff --git a/tests/test_robot_states_flatcache.py b/tests/test_robot_states_flatcache.py index 82d42db7f..40e142617 100644 --- a/tests/test_robot_states_flatcache.py +++ b/tests/test_robot_states_flatcache.py @@ -185,7 +185,7 @@ def test_robot_load_drive(): robot.reload_controllers(controller_config=controller_config) env.scene.update_initial_state() - action_primitives = StarterSemanticActionPrimitives(env) + action_primitives = StarterSemanticActionPrimitives(robot) eef_pos = env.robots[0].get_eef_position() eef_orn = env.robots[0].get_eef_orientation() @@ -200,7 +200,7 @@ def test_robot_load_drive(): # If this is a locomotion robot, we want to test driving if isinstance(robot, LocomotionRobot): - action_primitives = StarterSemanticActionPrimitives(env) + action_primitives = StarterSemanticActionPrimitives(robot) goal_location = th.tensor([0, 1, 0], dtype=th.float32) for action in action_primitives._navigate_to_pose_direct(goal_location): env.step(action) @@ -287,7 +287,7 @@ def object_is_in_hand(robot, obj): for _ in range(10): og.sim.step() - action_primitives = StarterSemanticActionPrimitives(env) + action_primitives = StarterSemanticActionPrimitives(robot) box_object = env.scene.object_registry("name", "box") target_eef_pos = box_object.get_position_orientation()[0] From 8f73b6f0eec4af81e5fd83f943847d25c4f07001 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Wed, 6 Nov 2024 11:40:22 -0800 Subject: [PATCH 04/76] Completely remove OMPL planning context from action primitives plus refactoring --- .../starter_semantic_action_primitives.py | 242 ++++++++++-------- 1 file changed, 130 insertions(+), 112 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 522cbdc04..6024c486d 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -281,7 +281,8 @@ def apply_ref(self, primitive, *args, attempts=5): try: # Make sure we retract the arm after every step - yield from self._reset_hand() + for arm in self.robot.arm_names: + yield from self._reset_hand(arm) except ActionPrimitiveError: pass @@ -504,7 +505,10 @@ def _grasp(self, obj): # Since the grasp pose is slightly off the object, we want to move towards the object, around 5cm. # It's okay if we can't go all the way because we run into the object. indented_print("Performing grasp approach") - yield from self._move_hand_linearly_cartesian(approach_pose, stop_on_contact=True) + + # TODO: implement this linear motion better with curobo constrained planning + # yield from self._move_hand_linearly_cartesian(approach_pose, stop_on_contact=True) + yield from self._move_hand(approach_pose) # Step once to update empty_action = self._empty_action() @@ -674,7 +678,11 @@ def _target_in_reach_of_robot_relative(self, relative_target_pose): Returns: bool: Whether eef can the reach target pose """ - return self._ik_solver_cartesian_to_joint_space(relative_target_pose) is not None + # TODO: output may need to be a dictionary of arms and whether they can reach the target pose + for arm in self.robot.arm_names: + if self._ik_solver_cartesian_to_joint_space(relative_target_pose, arm=arm) is not None: + return True + return False @property def _manipulation_control_idx(self): @@ -691,17 +699,7 @@ def _manipulation_control_idx(self): # Otherwise just return the default arm control idx return self.robot.arm_control_idx[self.arm] - @property - def _manipulation_descriptor_path(self): - """The appropriate manipulation descriptor for the current settings.""" - if isinstance(self.robot, Tiago) and m.TIAGO_TORSO_FIXED: - assert self.arm == "left", "Fixed torso mode only supports left arm!" - return self.robot.robot_arm_descriptor_yamls["left_fixed"] - - # Otherwise just return the default arm control idx - return self.robot.robot_arm_descriptor_yamls[self.arm] - - def _ik_solver_cartesian_to_joint_space(self, relative_target_pose): + def _ik_solver_cartesian_to_joint_space(self, relative_target_pose, arm=None): """ Get joint positions for the arm so eef is at the target pose where the target pose is in the robot frame @@ -713,8 +711,10 @@ def _ik_solver_cartesian_to_joint_space(self, relative_target_pose): - th.tensor or None: Joint positions to reach target pose or None if impossible to reach the target pose - th.tensor: Indices for joints in the robot """ + if arm is None: + arm = self.arm ik_solver = IKSolver( - robot_description_path=self._manipulation_descriptor_path, + robot_description_path=self.robot.robot_arm_descriptor_yamls[arm], robot_urdf_path=self.robot.urdf_path, reset_joint_pos=self.robot.reset_joint_pos[self._manipulation_control_idx], eef_name=self.robot.eef_link_names[self.arm], @@ -752,18 +752,23 @@ def _move_hand(self, target_pose, arm=None, stop_if_stuck=False): self.robot.eef_link_names["left"]: left_hand_quat, self.robot.eef_link_names["right"]: right_hand_quat, } + yield from self._plan_and_execute_joint_motion(target_pos, target_quat, CuroboEmbodimentSelection.ARM) + + def _plan_and_execute_joint_motion( + self, target_pos, target_quat, embodiment_selection=CuroboEmbodimentSelection.DEFAULT, ignore_failure=False + ): successes, traj_paths = self._motion_generator.compute_trajectories( target_pos=target_pos, target_quat=target_quat, is_local=False, - max_attempts=5, + max_attempts=50, timeout=60.0, ik_fail_return=5, enable_finetune_trajopt=True, finetune_attempts=1, return_full_result=False, success_ratio=1.0, - emb_sel=CuroboEmbodimentSelection.ARM, + emb_sel=embodiment_selection, ) success, traj_path = successes[0], traj_paths[0] if not success: @@ -772,34 +777,48 @@ def _move_hand(self, target_pose, arm=None, stop_if_stuck=False): "There is no accessible path from where you are to the desired pose. Try again", ) - q_traj = self._motion_generator.path_to_joint_trajectory(traj_path, CuroboEmbodimentSelection.ARM) + q_traj = self._motion_generator.path_to_joint_trajectory(traj_path, embodiment_selection) - # Follow the plan to move the eef + # Follow the joint motion plan indented_print(f"Plan has {len(q_traj)} steps") for i, joint_pos in enumerate(q_traj): - indented_print(f"Executing arm movement plan step {i + 1}/{len(q_traj)}") - yield from self._execute_joint_motion(joint_pos.cpu()) - - def _execute_joint_motion(self, q, ignore_failure=False): - # Convert target joint positions to command - action = [] - for controller in self.robot.controllers.values(): - action.append(q[controller.dof_idx]) - action = th.cat(action, dim=0) - assert action.shape[0] == self.robot.action_dim - - for i in range(m.MAX_STEPS_FOR_JOINT_MOTION): - current_joint_pos = self.robot.get_joint_positions() - joint_pos_diff = q - current_joint_pos - if th.max(th.abs(joint_pos_diff)).item() < m.JOINT_POS_DIFF_THRESHOLD: - return - yield self._postprocess_action(action) + indented_print(f"Executing motion plan step {i + 1}/{len(q_traj)}") + desired_joint_pos = joint_pos.cpu() + # Convert target joint positions to command + action = [] + for controller in self.robot.controllers.values(): + action.append(desired_joint_pos[controller.dof_idx]) + action = th.cat(action, dim=0) + assert action.shape[0] == self.robot.action_dim + + base_target_reached = False + articulation_target_reached = False + + for i in range(m.MAX_STEPS_FOR_JOINT_MOTION): + current_joint_pos = self.robot.get_joint_positions() + joint_pos_diff = desired_joint_pos - current_joint_pos + base_joint_diff = joint_pos_diff[self.robot.base_control_idx] + articulation_joint_diff = joint_pos_diff[~self.robot.base_control_idx] # Gets all non-base joints + if th.max(th.abs(articulation_joint_diff)).item() < m.JOINT_POS_DIFF_THRESHOLD: + articulation_target_reached = True + # TODO: genralize this to transaltion&rotation + high/low precision modes + if th.max(th.abs(base_joint_diff)).item() < m.DEFAULT_DIST_THRESHOLD: + base_target_reached = True + if base_target_reached and articulation_target_reached: + break + yield self._postprocess_action(action) - if not ignore_failure: - raise ActionPrimitiveError( - ActionPrimitiveError.Reason.EXECUTION_ERROR, - "Could not reach the target joint positions. Try again", - ) + if not base_target_reached and not ignore_failure: + raise ActionPrimitiveError( + ActionPrimitiveError.Reason.EXECUTION_ERROR, + "Could not reach the target base joint positions. Try again", + ) + + if not articulation_target_reached and not ignore_failure: + raise ActionPrimitiveError( + ActionPrimitiveError.Reason.EXECUTION_ERROR, + "Could not reach the target articulation joint positions. Try again", + ) def _add_linearly_interpolated_waypoints(self, plan, max_inter_dist): """ @@ -1243,15 +1262,11 @@ def _reset_hand(self, arm=None): arm = self.arm indented_print("Resetting hand") # TODO: make this work with both hands - reset_eef_pose = self._get_reset_eef_pose() - try: - yield from self._move_hand(reset_pose, arm=arm) - except ActionPrimitiveError: - indented_print("Could not do a planned reset of the hand - probably obj_in_hand collides with body") - yield from self._execute_joint_motion(reset_pose, ignore_failure=True) + reset_eef_pose = self._get_reset_eef_pose()[arm] + yield from self._move_hand(reset_eef_pose, arm=arm) def _get_reset_eef_pose(self): - # TODO: Add support for Fetch + # TODO: Define this for all robots if isinstance(self.robot, Tiago): return th.tensor([0.28493954, 0.37450749, 1.1512334]), th.tensor( [-0.21533823, 0.05361032, -0.08631776, 0.97123871] @@ -1260,6 +1275,17 @@ def _get_reset_eef_pose(self): return th.tensor([0.48688125, -0.12507881, 0.97888719]), th.tensor( [0.61324748, 0.61305553, -0.35266518, 0.35173529] ) + elif isinstance(self.robot, R1): + return { + self.robot.arm_names[0]: ( + th.tensor([0.4923, 0.4144, 1.4077]), + th.tensor([0.7071, 0.0002, 0.7072, -0.0001]), + ), + self.robot.arm_names[1]: ( + th.tensor([0.4926, -0.4141, 1.4081]), + th.tensor([-0.7072, -0.0001, -0.7070, 0.0001]), + ), + } else: raise ValueError(f"Unsupported robot: {type(self.robot)}") @@ -1331,29 +1357,18 @@ def _navigate_to_pose(self, pose_2d): Returns: th.tensor or None: Action array for one step for the robot to navigate or None if it is done navigating """ - with PlanningContext(self.env, self.robot, self.robot_copy, "simplified") as context: - plan = plan_base_motion( - robot=self.robot, - end_conf=pose_2d, - context=context, - ) - if plan is None: - # TODO: Would be great to produce a more informative error. - raise ActionPrimitiveError( - ActionPrimitiveError.Reason.PLANNING_ERROR, - "Could not make a navigation plan to get to the target position", - ) + # TODO: Change curobo implementation so that base motion plannning take a 2D pose instead of 3D? + pose_3d = self._get_robot_pose_from_2d_pose(pose_2d) + # TODO: change defualt to 0.0? Why is it 0.1? + # TODO: do we still need high/low precision modes? + pose_3d[0][2] = 0.0 + target_pos = {self.robot.base_footprint_link_name: pose_3d[0]} + target_quat = {self.robot.base_footprint_link_name: pose_3d[1]} - # Follow the plan to navigate. - # self._draw_plan(plan) - indented_print(f"Navigation plan has {len(plan)} steps") - for i, pose_2d in enumerate(plan): - indented_print(f"Executing navigation plan step {i + 1}/{len(plan)}") - low_precision = True if i < len(plan) - 1 else False - yield from self._navigate_to_pose_direct(pose_2d, low_precision=low_precision) + yield from self._plan_and_execute_joint_motion(target_pos, target_quat, CuroboEmbodimentSelection.BASE) - # TODO: implement this for curobo + # TODO: implement this for curobo? def _draw_plan(self, plan): SEARCHED = [] trav_map = self.robot.scene._trav_map @@ -1530,49 +1545,48 @@ def _sample_pose_near_object(self, obj, pose_on_obj=None, **kwargs): - 3-array: (x,y,z) Position in the world frame - 4-array: (x,y,z,w) Quaternion orientation in the world frame """ - with PlanningContext(self.env, self.robot, self.robot_copy, "simplified") as context: - for _ in range(m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT): - if pose_on_obj is None: - pos_on_obj = self._sample_position_on_aabb_side(obj) - pose_on_obj = [pos_on_obj, th.tensor([0, 0, 0, 1])] - - distance_lo, distance_hi = 0.0, 5.0 - distance = (th.rand(1) * (distance_hi - distance_lo) + distance_lo).item() - yaw_lo, yaw_hi = -math.pi, math.pi - yaw = th.rand(1) * (yaw_hi - yaw_lo) + yaw_lo - avg_arm_workspace_range = th.mean(self.robot.arm_workspace_range[self.arm]) - pose_2d = th.cat( - [ - pose_on_obj[0][0] + distance * th.cos(yaw), - pose_on_obj[0][1] + distance * th.sin(yaw), - yaw + math.pi - avg_arm_workspace_range, - ] - ) - # Check room - obj_rooms = ( - obj.in_rooms - if obj.in_rooms - else [self.robot.scene._seg_map.get_room_instance_by_point(pose_on_obj[0][:2])] - ) - if self.robot.scene._seg_map.get_room_instance_by_point(pose_2d[:2]) not in obj_rooms: - indented_print("Candidate position is in the wrong room.") - continue + for _ in range(m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT): + if pose_on_obj is None: + pos_on_obj = self._sample_position_on_aabb_side(obj) + pose_on_obj = [pos_on_obj, th.tensor([0, 0, 0, 1])] + + distance_lo, distance_hi = 0.0, 5.0 + distance = (th.rand(1) * (distance_hi - distance_lo) + distance_lo).item() + yaw_lo, yaw_hi = -math.pi, math.pi + yaw = th.rand(1) * (yaw_hi - yaw_lo) + yaw_lo + avg_arm_workspace_range = th.mean(self.robot.arm_workspace_range[self.arm]) + pose_2d = th.cat( + [ + pose_on_obj[0][0] + distance * th.cos(yaw), + pose_on_obj[0][1] + distance * th.sin(yaw), + yaw + math.pi - avg_arm_workspace_range, + ] + ) + # Check room + obj_rooms = ( + obj.in_rooms + if obj.in_rooms + else [self.robot.scene._seg_map.get_room_instance_by_point(pose_on_obj[0][:2])] + ) + if self.robot.scene._seg_map.get_room_instance_by_point(pose_2d[:2]) not in obj_rooms: + indented_print("Candidate position is in the wrong room.") + continue - if not self._test_pose(pose_2d, context, pose_on_obj=pose_on_obj, **kwargs): - continue + if not self._test_pose(pose_2d, pose_on_obj=pose_on_obj, **kwargs): + continue - indented_print("Found valid position near object.") - return pose_2d + indented_print("Found valid position near object.") + return pose_2d - raise ActionPrimitiveError( - ActionPrimitiveError.Reason.SAMPLING_ERROR, - "Could not find valid position near object.", - { - "target object": obj.name, - "target pos": obj.get_position_orientation()[0], - "pose on target": pose_on_obj, - }, - ) + raise ActionPrimitiveError( + ActionPrimitiveError.Reason.SAMPLING_ERROR, + "Could not find valid position near object.", + { + "target object": obj.name, + "target pos": obj.get_position_orientation()[0], + "pose on target": pose_on_obj, + }, + ) @staticmethod def _sample_position_on_aabb_side(target_obj): @@ -1673,14 +1687,12 @@ def _sample_pose_with_object_and_predicate( {"target object": target_obj.name, "object in hand": held_obj.name, "relation": pred_map[predicate]}, ) - # TODO: Why do we need to pass in the context here? - def _test_pose(self, pose_2d, context, pose_on_obj=None): + def _test_pose(self, pose_2d, pose_on_obj=None): """ Determines whether the robot can reach the pose on the object and is not in collision at the specified 2d pose Args: pose_2d (Iterable): (x, y, yaw) 2d pose - context (Context): Planning context reference pose_on_obj (Iterable of arrays): Pose on the object in the world frame Returns: @@ -1692,7 +1704,13 @@ def _test_pose(self, pose_2d, context, pose_on_obj=None): if not self._target_in_reach_of_robot_relative(relative_pose): return False - if set_base_and_detect_collision(context, pose): + joint_pos = self.robot.get_joint_positions() + joint_pos[self.robot.base_idx[:3]] = pose[0] + joint_pos[self.robot.base_idx[3:]] = T.quat2euler(pose[1]) + + collision_results = self._motion_generator.check_collisions(joint_pos, check_self_collision=False) + + if collision_results[0].cpu().item(): indented_print("Candidate position failed collision test.") return False return True From 519f5e5c92b6b9f20dde904287e4ed986a09d3b9 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Thu, 7 Nov 2024 16:23:36 -0800 Subject: [PATCH 05/76] Fix grasp pose bug --- omnigibson/utils/grasping_planning_utils.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/omnigibson/utils/grasping_planning_utils.py b/omnigibson/utils/grasping_planning_utils.py index 728303132..ffb9692e2 100644 --- a/omnigibson/utils/grasping_planning_utils.py +++ b/omnigibson/utils/grasping_planning_utils.py @@ -32,11 +32,13 @@ def get_grasp_poses_for_object_sticky(target_obj): visual=False ) - grasp_center_pos = bbox_center_in_world + th.tensor([0, 0, th.max(bbox_extent_in_base_frame) + 0.05]) + grasp_center_pos = bbox_center_in_world + th.tensor([0, 0, th.max(bbox_extent_in_base_frame) / 2 + 0.05]) towards_object_in_world_frame = bbox_center_in_world - grasp_center_pos towards_object_in_world_frame /= th.norm(towards_object_in_world_frame) - grasp_quat = T.euler2quat(th.tensor([0, math.pi / 2, 0], dtype=th.float32)) + # TODO: figure out why this was pi/2 in the y-axis + # grasp_quat = T.euler2quat(th.tensor([0, math.pi / 2, 0], dtype=th.float32)) + grasp_quat = T.euler2quat(th.tensor([0, 0, 0], dtype=th.float32)) grasp_pose = (grasp_center_pos, grasp_quat) grasp_candidate = [(grasp_pose, towards_object_in_world_frame)] From 0fbefe7979c82e08cc80e8b4e6a5a4b3c9e315c2 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Thu, 7 Nov 2024 16:34:13 -0800 Subject: [PATCH 06/76] Primitives & curobo bug fixes - grasp mostly working --- omnigibson/action_primitives/curobo.py | 36 ++++ .../starter_semantic_action_primitives.py | 197 +++++++++++------- omnigibson/robots/r1.py | 4 +- 3 files changed, 158 insertions(+), 79 deletions(-) diff --git a/omnigibson/action_primitives/curobo.py b/omnigibson/action_primitives/curobo.py index 7b21ad6db..13c324cdb 100644 --- a/omnigibson/action_primitives/curobo.py +++ b/omnigibson/action_primitives/curobo.py @@ -383,6 +383,7 @@ def compute_trajectories( return_full_result=False, success_ratio=None, attached_obj=None, + motion_constraint=None, emb_sel=CuroboEmbodimentSelection.DEFAULT, ): """ @@ -460,6 +461,14 @@ def compute_trajectories( success_ratio=1.0 / self.batch_size if success_ratio is None else success_ratio, ) + # Add the pose cost metric + if motion_constraint is None: + motion_constraint = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + pose_cost_metric = lazy.curobo.wrap.reacher.motion_gen.PoseCostMetric( + hold_partial_pose=False, hold_vec_weight=self._tensor_args.to_device(motion_constraint) + ) + plan_cfg.pose_cost_metric = pose_cost_metric + # Refresh the collision state self.update_obstacles(ignore_paths=None, emb_sel=emb_sel) @@ -672,6 +681,33 @@ def convert_q_to_eef_traj(self, traj, return_axisangle=False, emb_sel=CuroboEmbo return th.concatenate([positions, orientations], dim=-1) + def solve_ik(self, target_pos, target_quat, emb_sel=CuroboEmbodimentSelection.ARM): + # # If target_pos and target_quat are torch tensors, it's assumed that they correspond to the default ee_link + # if isinstance(target_pos, th.Tensor): + # target_pos = {self.ee_link[emb_sel]: target_pos} + # if isinstance(target_quat, th.Tensor): + # target_quat = {self.ee_link[emb_sel]: target_quat} + + # # Refresh the collision state + # self.update_obstacles(ignore_paths=None, emb_sel=emb_sel) + + # assert target_pos.keys() == target_quat.keys(), "Expected target_pos and target_quat to have the same keys!" + + # # TODO: fill these in + # solve_state = self.mg[emb_sel]._get_solve_state(lazy.curobo.ReacherSolveType.GOALSET, plan_config, goal_pose, start_state) + + # ik_result = self.mg[emb_sel]._solve_ik_from_solve_state( + # goal_pose=, + # solve_state=solve_state, + # start_state=, + # use_nn_seed=False, + # partial_ik_opt=False, + # link_poses=, + # ) + + # breakpoint() + return + @property def tensor_args(self): """ diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 6024c486d..a3c16d3a3 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -66,6 +66,8 @@ R1: 0.2, } +m.MAX_PLANNING_ATTEMPTS = 50 + m.MAX_STEPS_FOR_SETTLING = 500 m.MAX_STEPS_FOR_JOINT_MOTION = 500 @@ -92,7 +94,7 @@ m.LOW_PRECISION_ANGLE_THRESHOLD = 0.2 m.TIAGO_TORSO_FIXED = False -m.JOINT_POS_DIFF_THRESHOLD = 0.01 +m.JOINT_POS_DIFF_THRESHOLD = 0.005 m.JOINT_CONTROL_MIN_ACTION = 0.0 m.MAX_ALLOWED_JOINT_ERROR_FOR_LINEAR_MOTION = math.radians(45) m.TIME_BEFORE_JOINT_STUCK_CHECK = 1.0 @@ -487,8 +489,13 @@ def _grasp(self, obj): grasp_poses = get_grasp_poses_for_object_sticky(obj) grasp_pose, object_direction = random.choice(grasp_poses) + # Take into account the robot eef reset pose + reset_orientation = self._get_reset_eef_pose()[self.arm][1] + grasp_pose = (grasp_pose[0], T.quat_multiply(grasp_pose[1], reset_orientation)) + # Prepare data for the approach later. - approach_pos = grasp_pose[0] + object_direction * m.GRASP_APPROACH_DISTANCE + # TODO: fix this threshold + approach_pos = grasp_pose[0] + object_direction * 0.03 # m.GRASP_APPROACH_DISTANCE approach_pose = (approach_pos, grasp_pose[1]) # If the grasp pose is too far, navigate. @@ -496,23 +503,20 @@ def _grasp(self, obj): yield from self._navigate_if_needed(obj, pose_on_obj=grasp_pose) indented_print("Moving hand to grasp pose") - yield from self._move_hand(grasp_pose) + yield from self._move_hand(grasp_pose, motion_constraint=[1, 1, 0, 0, 0, 0]) - # We can pre-grasp in sticky grasping mode. - indented_print("Pregrasp squeeze") - yield from self._execute_grasp() + # # We can pre-grasp in sticky grasping mode. + # indented_print("Pregrasp squeeze") + # yield from self._execute_grasp() # Since the grasp pose is slightly off the object, we want to move towards the object, around 5cm. # It's okay if we can't go all the way because we run into the object. indented_print("Performing grasp approach") - # TODO: implement this linear motion better with curobo constrained planning - # yield from self._move_hand_linearly_cartesian(approach_pose, stop_on_contact=True) - yield from self._move_hand(approach_pose) + # TODO: implement linear cartesian motion with curobo constrained planning + yield from self._move_hand(approach_pose) # motion_constraint=[0, 0, 0, 0, 0, 1] - # Step once to update - empty_action = self._empty_action() - yield self._postprocess_action(empty_action) + yield from self._execute_grasp() indented_print("Checking grasp") if self._get_obj_in_hand() is None: @@ -523,7 +527,8 @@ def _grasp(self, obj): ) indented_print("Moving hand back") - yield from self._reset_hand() + # TODO: 1. default to attach in-hand object 2. this root link logic is bad, fix it + yield from self._reset_hand(attached_obj={self.robot.eef_link_names[self.arm]: obj.root_link}) indented_print("Done with grasp") @@ -728,7 +733,7 @@ def _ik_solver_cartesian_to_joint_space(self, relative_target_pose, arm=None): return joint_pos - def _move_hand(self, target_pose, arm=None, stop_if_stuck=False): + def _move_hand(self, target_pose, arm=None, stop_if_stuck=False, attached_obj=None, motion_constraint=None): """ Yields action for the robot to move hand so the eef is in the target pose using the planner @@ -740,7 +745,8 @@ def _move_hand(self, target_pose, arm=None, stop_if_stuck=False): """ if arm is None: arm = self.arm - yield from self._settle_robot() + # TODO: this settle robot does not work because empty action doesn't work with the current controller modes + # yield from self._settle_robot() # curobo motion generator takes a pose but outputs joint positions left_hand_pos, left_hand_quat = target_pose if arm == "left" else self.robot.get_eef_pose(arm="left") right_hand_pos, right_hand_quat = target_pose if arm == "right" else self.robot.get_eef_pose(arm="right") @@ -752,25 +758,45 @@ def _move_hand(self, target_pose, arm=None, stop_if_stuck=False): self.robot.eef_link_names["left"]: left_hand_quat, self.robot.eef_link_names["right"]: right_hand_quat, } - yield from self._plan_and_execute_joint_motion(target_pos, target_quat, CuroboEmbodimentSelection.ARM) - - def _plan_and_execute_joint_motion( - self, target_pos, target_quat, embodiment_selection=CuroboEmbodimentSelection.DEFAULT, ignore_failure=False - ): - successes, traj_paths = self._motion_generator.compute_trajectories( + yield from self._plan_and_execute_joint_motion( target_pos=target_pos, target_quat=target_quat, - is_local=False, - max_attempts=50, - timeout=60.0, - ik_fail_return=5, - enable_finetune_trajopt=True, - finetune_attempts=1, - return_full_result=False, - success_ratio=1.0, - emb_sel=embodiment_selection, + embodiment_selection=CuroboEmbodimentSelection.ARM, + attached_obj=attached_obj, + motion_constraint=motion_constraint, ) - success, traj_path = successes[0], traj_paths[0] + + def _plan_and_execute_joint_motion( + self, + target_pos, + target_quat, + embodiment_selection=CuroboEmbodimentSelection.DEFAULT, + attached_obj=None, + motion_constraint=None, + ignore_failure=False, + ): + planning_attempts = 0 + success = False + traj_path = None + while not success and planning_attempts < m.MAX_PLANNING_ATTEMPTS: + successes, traj_paths = self._motion_generator.compute_trajectories( + target_pos=target_pos, + target_quat=target_quat, + is_local=False, + max_attempts=1, + timeout=60.0, + ik_fail_return=5, + enable_finetune_trajopt=True, + finetune_attempts=1, + return_full_result=False, + success_ratio=1.0, + attached_obj=attached_obj, + motion_constraint=motion_constraint, + emb_sel=embodiment_selection, + ) + success, traj_path = successes[0].item(), traj_paths[0] + planning_attempts += 1 + if not success: raise ActionPrimitiveError( ActionPrimitiveError.Reason.PLANNING_ERROR, @@ -785,11 +811,7 @@ def _plan_and_execute_joint_motion( indented_print(f"Executing motion plan step {i + 1}/{len(q_traj)}") desired_joint_pos = joint_pos.cpu() # Convert target joint positions to command - action = [] - for controller in self.robot.controllers.values(): - action.append(desired_joint_pos[controller.dof_idx]) - action = th.cat(action, dim=0) - assert action.shape[0] == self.robot.action_dim + action = self._q_to_action(desired_joint_pos) base_target_reached = False articulation_target_reached = False @@ -820,6 +842,14 @@ def _plan_and_execute_joint_motion( "Could not reach the target articulation joint positions. Try again", ) + def _q_to_action(self, q): + action = [] + for controller in self.robot.controllers.values(): + action.append(q[controller.dof_idx]) + action = th.cat(action, dim=0) + assert action.shape[0] == self.robot.action_dim + return action + def _add_linearly_interpolated_waypoints(self, plan, max_inter_dist): """ Adds waypoints to the plan so the distance between values in the plan never exceeds the max_inter_dist. @@ -1081,43 +1111,53 @@ def _move_hand_linearly_cartesian( "Your hand was obstructed from moving to the desired world position", ) - def _execute_grasp(self): + def _move_fingers_to_limit(self, limit_type): """ - Yields action for the robot to grasp + Helper function to move the robot's fingers to their limit positions. - Returns: - th.tensor or None: Action array for one step for the robot to grasp or None if its done grasping - """ - for _ in range(m.MAX_STEPS_FOR_GRASP_OR_RELEASE): - joint_position = self.robot.get_joint_positions()[self.robot.gripper_control_idx[self.arm]] - joint_lower_limit = self.robot.joint_lower_limits[self.robot.gripper_control_idx[self.arm]] + Args: + limit_type (str): Either 'lower' for grasping or 'upper' for releasing. - if th.allclose(joint_position, joint_lower_limit, atol=0.01): - break + Yields: + th.tensor or None: Action array for one step for the robot to move fingers or None if done. + """ + q = self.robot.get_joint_positions() + joint_names = list(self.robot.joints.keys()) + for finger_joints in self.robot.finger_joints.values(): + for finger_joint in finger_joints: + idx = joint_names.index(finger_joint.joint_name) + q[idx] = getattr(finger_joint, f"{limit_type}_limit") + action = self._q_to_action(q) + finger_joint_limits = getattr(self.robot, f"joint_{limit_type}_limits")[ + self.robot.gripper_control_idx[self.arm] + ] - action = self._empty_action() - controller_name = "gripper_{}".format(self.arm) - action[self.robot.controller_action_idx[controller_name]] = -1.0 + for _ in range(m.MAX_STEPS_FOR_GRASP_OR_RELEASE): + finger_joint_positions = self.robot.get_joint_positions()[self.robot.gripper_control_idx[self.arm]] + if th.allclose(finger_joint_positions, finger_joint_limits, atol=0.005): + break + elif limit_type == "lower" and self._get_obj_in_hand() is not None: + # If we are grasping an object, we should stop when object is detected in hand + break yield self._postprocess_action(action) - def _execute_release(self): + def _execute_grasp(self): """ - Yields action for the robot to release its grasp + Yields action for the robot to grasp. Returns: - th.tensor or None: Action array for one step for the robot to release or None if its done releasing + th.tensor or None: Action array for one step for the robot to grasp or None if done grasping. """ - for _ in range(m.MAX_STEPS_FOR_GRASP_OR_RELEASE): - joint_position = self.robot.get_joint_positions()[self.robot.gripper_control_idx[self.arm]] - joint_upper_limit = self.robot.joint_upper_limits[self.robot.gripper_control_idx[self.arm]] + yield from self._move_fingers_to_limit("lower") - if th.allclose(joint_position, joint_upper_limit, atol=0.01): - break + def _execute_release(self): + """ + Yields action for the robot to release its grasp. - action = self._empty_action() - controller_name = "gripper_{}".format(self.arm) - action[self.robot.controller_action_idx[controller_name]] = 1.0 - yield self._postprocess_action(action) + Returns: + th.tensor or None: Action array for one step for the robot to release or None if done releasing. + """ + yield from self._move_fingers_to_limit("upper") if self._get_obj_in_hand() is not None: raise ActionPrimitiveError( @@ -1251,7 +1291,7 @@ def _empty_action(self, follow_arm_targets=True): action[action_idx] = partial_action return action - def _reset_hand(self, arm=None): + def _reset_hand(self, arm=None, attached_obj=None): """ Yields action to move the hand to the position optimal for executing subsequent action primitives @@ -1263,29 +1303,32 @@ def _reset_hand(self, arm=None): indented_print("Resetting hand") # TODO: make this work with both hands reset_eef_pose = self._get_reset_eef_pose()[arm] - yield from self._move_hand(reset_eef_pose, arm=arm) + yield from self._move_hand(reset_eef_pose, arm=arm, attached_obj=attached_obj) + @property def _get_reset_eef_pose(self): - # TODO: Define this for all robots - if isinstance(self.robot, Tiago): - return th.tensor([0.28493954, 0.37450749, 1.1512334]), th.tensor( - [-0.21533823, 0.05361032, -0.08631776, 0.97123871] - ) - elif isinstance(self.robot, Fetch): - return th.tensor([0.48688125, -0.12507881, 0.97888719]), th.tensor( - [0.61324748, 0.61305553, -0.35266518, 0.35173529] - ) + if isinstance(self.robot, Fetch): + return { + self.arm: ( + th.tensor([0.48688125, -0.12507881, 0.97888719]), + th.tensor([0.61324748, 0.61305553, -0.35266518, 0.35173529]), + ) + } elif isinstance(self.robot, R1): return { self.robot.arm_names[0]: ( - th.tensor([0.4923, 0.4144, 1.4077]), - th.tensor([0.7071, 0.0002, 0.7072, -0.0001]), + th.tensor([0.43, 0.2, 1.2]), + th.tensor([1.0, 0.0, 0.0, 0.0]), ), self.robot.arm_names[1]: ( - th.tensor([0.4926, -0.4141, 1.4081]), - th.tensor([-0.7072, -0.0001, -0.7070, 0.0001]), + th.tensor([0.43, -0.2, 1.2]), + th.tensor([-1.0, 0.0, 0.0, 0.0]), ), } + # TODO: Define this for Tiago + # th.tensor([0.28493954, 0.37450749, 1.1512334]), th.tensor( + # [-0.21533823, 0.05361032, -0.08631776, 0.97123871] + # ) else: raise ValueError(f"Unsupported robot: {type(self.robot)}") @@ -1550,7 +1593,7 @@ def _sample_pose_near_object(self, obj, pose_on_obj=None, **kwargs): pos_on_obj = self._sample_position_on_aabb_side(obj) pose_on_obj = [pos_on_obj, th.tensor([0, 0, 0, 1])] - distance_lo, distance_hi = 0.0, 5.0 + distance_lo, distance_hi = 0.0, 1.0 distance = (th.rand(1) * (distance_hi - distance_lo) + distance_lo).item() yaw_lo, yaw_hi = -math.pi, math.pi yaw = th.rand(1) * (yaw_hi - yaw_lo) + yaw_lo diff --git a/omnigibson/robots/r1.py b/omnigibson/robots/r1.py index e5abd1d53..b7364b7b2 100644 --- a/omnigibson/robots/r1.py +++ b/omnigibson/robots/r1.py @@ -170,8 +170,8 @@ def untucked_default_joint_pos(self): pos = th.zeros(self.n_dof) # Keep the current joint positions for the base joints pos[self.base_idx] = self.get_joint_positions()[self.base_idx] - for arm in self.arm_names: - pos[self.arm_control_idx[arm]] = th.tensor([0.0, 1.906, -0.991, 1.571, 0.915, -1.571]) + pos[self.arm_control_idx["left"]] = th.tensor([-0.0464, 2.6172, -1.4584, -0.0433, 1.5899, -1.1587]) + pos[self.arm_control_idx["right"]] = th.tensor([0.0464, 2.6168, -1.4570, 0.0418, -1.5896, 1.1593]) return pos @property From 749d2cf8401981942dd269623f7ba024a344745f Mon Sep 17 00:00:00 2001 From: hang-yin Date: Fri, 8 Nov 2024 13:44:59 -0800 Subject: [PATCH 07/76] Refactor primitives and get grasp working --- .../starter_semantic_action_primitives.py | 255 +++++++++++------- 1 file changed, 164 insertions(+), 91 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index a3c16d3a3..04d5de353 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -35,6 +35,7 @@ from omnigibson.tasks.behavior_task import BehaviorTask from omnigibson.utils.control_utils import FKSolver, IKSolver, orientation_error from omnigibson.utils.grasping_planning_utils import get_grasp_poses_for_object_sticky, get_grasp_position_for_open +from omnigibson.utils.motion_planning_utils import detect_robot_collision_in_sim from omnigibson.utils.object_state_utils import sample_cuboid_for_predicate from omnigibson.utils.python_utils import multi_dim_linspace from omnigibson.utils.ui_utils import create_module_logger @@ -465,6 +466,12 @@ def _grasp(self, obj): Returns: th.tensor or None: Action array for one step for the robot to grasp or None if grasp completed """ + if obj is None or not isinstance(obj, BaseObject): + raise ActionPrimitiveError( + ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, + "You need to provide an object to grasp", + {"provided object": obj}, + ) # Update the tracking to track the object. self._tracking_object = obj @@ -490,12 +497,12 @@ def _grasp(self, obj): grasp_pose, object_direction = random.choice(grasp_poses) # Take into account the robot eef reset pose - reset_orientation = self._get_reset_eef_pose()[self.arm][1] + reset_orientation = self._get_reset_eef_pose("world")[self.arm][1] grasp_pose = (grasp_pose[0], T.quat_multiply(grasp_pose[1], reset_orientation)) # Prepare data for the approach later. # TODO: fix this threshold - approach_pos = grasp_pose[0] + object_direction * 0.03 # m.GRASP_APPROACH_DISTANCE + approach_pos = grasp_pose[0] + object_direction * 0.04 # m.GRASP_APPROACH_DISTANCE approach_pose = (approach_pos, grasp_pose[1]) # If the grasp pose is too far, navigate. @@ -505,30 +512,40 @@ def _grasp(self, obj): indented_print("Moving hand to grasp pose") yield from self._move_hand(grasp_pose, motion_constraint=[1, 1, 0, 0, 0, 0]) - # # We can pre-grasp in sticky grasping mode. - # indented_print("Pregrasp squeeze") - # yield from self._execute_grasp() - - # Since the grasp pose is slightly off the object, we want to move towards the object, around 5cm. - # It's okay if we can't go all the way because we run into the object. - indented_print("Performing grasp approach") - - # TODO: implement linear cartesian motion with curobo constrained planning - yield from self._move_hand(approach_pose) # motion_constraint=[0, 0, 0, 0, 0, 1] - - yield from self._execute_grasp() + # Pre-grasp in sticky grasping mode. + if self.robot.grasping_mode == "sticky": + indented_print("Pregrasp squeeze") + yield from self._execute_grasp() + + # Since the grasp pose is slightly off the object, we want to move towards the object, around 5cm. + # It's okay if we can't go all the way because we run into the object. + indented_print("Performing grasp approach") + # Use direct IK to move the hand to the approach pose. + yield from self._move_hand(approach_pose, plan_motion=False) + elif self.robot.grasping_mode == "assisted": + indented_print("Performing grasp approach") + # TODO: implement linear cartesian motion with curobo constrained planning + yield from self._move_hand(approach_pose) # motion_constraint=[0, 0, 0, 0, 0, 1] + + yield from self._execute_grasp() + + # Step once to update + empty_action = self._q_to_action(self.robot.get_joint_positions()) + yield self._postprocess_action(empty_action) indented_print("Checking grasp") - if self._get_obj_in_hand() is None: + obj_in_hand = self._get_obj_in_hand() + if obj_in_hand is None: raise ActionPrimitiveError( ActionPrimitiveError.Reason.POST_CONDITION_ERROR, "Grasp completed, but no object detected in hand after executing grasp", {"target object": obj.name}, ) + # TODO: reset density back when releasing + obj_in_hand.root_link.density = 1.0 indented_print("Moving hand back") - # TODO: 1. default to attach in-hand object 2. this root link logic is bad, fix it - yield from self._reset_hand(attached_obj={self.robot.eef_link_names[self.arm]: obj.root_link}) + yield from self._reset_hand() indented_print("Done with grasp") @@ -605,6 +622,12 @@ def _place_with_predicate(self, obj, predicate): Returns: th.tensor or None: Action array for one step for the robot to place or None if place completed """ + if obj is None or not isinstance(obj, BaseObject): + raise ActionPrimitiveError( + ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, + "You need to provide an object to place the object in your hand on", + {"provided object": obj}, + ) # Update the tracking to track the object. self._tracking_object = obj @@ -637,9 +660,9 @@ def _place_with_predicate(self, obj, predicate): {"dropped object": obj_in_hand.name, "target object": obj.name}, ) - yield from self._move_hand_upward() + yield from self._reset_hand() - def _convert_cartesian_to_joint_space(self, target_pose): + def _convert_cartesian_to_joint_space(self, target_pose, arm=None): """ Gets joint positions for the arm so eef is at the target pose @@ -651,8 +674,8 @@ def _convert_cartesian_to_joint_space(self, target_pose): - th.tensor or None: Joint positions to reach target pose or None if impossible to reach target pose - th.tensor: Indices for joints in the robot """ - relative_target_pose = self._get_pose_in_robot_frame(target_pose) - joint_pos = self._ik_solver_cartesian_to_joint_space(relative_target_pose) + relative_target_pose = self._world_pose_to_robot_pose(target_pose) + joint_pos = self._ik_solver_cartesian_to_joint_space(relative_target_pose, arm=arm) if joint_pos is None: raise ActionPrimitiveError( ActionPrimitiveError.Reason.PLANNING_ERROR, @@ -670,7 +693,7 @@ def _target_in_reach_of_robot(self, target_pose): Returns: bool: Whether eef can reach the target pose """ - relative_target_pose = self._get_pose_in_robot_frame(target_pose) + relative_target_pose = self._world_pose_to_robot_pose(target_pose) return self._target_in_reach_of_robot_relative(relative_target_pose) def _target_in_reach_of_robot_relative(self, relative_target_pose): @@ -727,13 +750,15 @@ def _ik_solver_cartesian_to_joint_space(self, relative_target_pose, arm=None): # Grab the joint positions in order to reach the desired pose target joint_pos = ik_solver.solve( target_pos=relative_target_pose[0], - target_quat=relative_target_pose[1], - max_iterations=100, + # target_quat=relative_target_pose[1], + max_iterations=200, + initial_joint_pos=self.robot.get_joint_positions()[self._manipulation_control_idx], + tolerance_pos=0.005, ) return joint_pos - def _move_hand(self, target_pose, arm=None, stop_if_stuck=False, attached_obj=None, motion_constraint=None): + def _move_hand(self, target_pose, plan_motion=True, arm=None, attached_obj=None, motion_constraint=None): """ Yields action for the robot to move hand so the eef is in the target pose using the planner @@ -745,35 +770,50 @@ def _move_hand(self, target_pose, arm=None, stop_if_stuck=False, attached_obj=No """ if arm is None: arm = self.arm - # TODO: this settle robot does not work because empty action doesn't work with the current controller modes - # yield from self._settle_robot() - # curobo motion generator takes a pose but outputs joint positions - left_hand_pos, left_hand_quat = target_pose if arm == "left" else self.robot.get_eef_pose(arm="left") - right_hand_pos, right_hand_quat = target_pose if arm == "right" else self.robot.get_eef_pose(arm="right") - target_pos = { - self.robot.eef_link_names["left"]: left_hand_pos, - self.robot.eef_link_names["right"]: right_hand_pos, - } - target_quat = { - self.robot.eef_link_names["left"]: left_hand_quat, - self.robot.eef_link_names["right"]: right_hand_quat, - } - yield from self._plan_and_execute_joint_motion( - target_pos=target_pos, - target_quat=target_quat, - embodiment_selection=CuroboEmbodimentSelection.ARM, - attached_obj=attached_obj, - motion_constraint=motion_constraint, - ) + if plan_motion: + # If an object is grasped, we need to pass it to the motion planner + obj_in_hand = self._get_obj_in_hand() + if obj_in_hand is not None: + # TODO: this root link logic is bad, fix it + attached_obj = {self.robot.eef_link_names[arm]: obj_in_hand.root_link} + # TODO: this settle robot does not work because empty action doesn't work with the current controller modes + # yield from self._settle_robot() + # curobo motion generator takes a pose but outputs joint positions + left_hand_pos, left_hand_quat = target_pose if arm == "left" else self.robot.get_eef_pose(arm="left") + right_hand_pos, right_hand_quat = target_pose if arm == "right" else self.robot.get_eef_pose(arm="right") + target_pos = { + self.robot.eef_link_names["left"]: left_hand_pos, + self.robot.eef_link_names["right"]: right_hand_pos, + } + target_quat = { + self.robot.eef_link_names["left"]: left_hand_quat, + self.robot.eef_link_names["right"]: right_hand_quat, + } + q_traj = self._plan_joint_motion( + target_pos=target_pos, + target_quat=target_quat, + embodiment_selection=CuroboEmbodimentSelection.ARM, + attached_obj=attached_obj, + motion_constraint=motion_constraint, + ) + else: + # Move EEF directly without collsion checking + arm_joint_pos = self._convert_cartesian_to_joint_space(target_pose, arm=arm) + single_traj = self.robot.get_joint_positions() + single_traj[self.robot.arm_control_idx[arm]] = arm_joint_pos + q_traj = [single_traj] + indented_print(f"Plan has {len(q_traj)} steps") + for i, joint_pos in enumerate(q_traj): + indented_print(f"Executing arm motion plan step {i + 1}/{len(q_traj)}") + yield from self._execute_joint_motion(joint_pos.cpu(), stop_on_contact=plan_motion is False) - def _plan_and_execute_joint_motion( + def _plan_joint_motion( self, target_pos, target_quat, embodiment_selection=CuroboEmbodimentSelection.DEFAULT, attached_obj=None, motion_constraint=None, - ignore_failure=False, ): planning_attempts = 0 success = False @@ -795,6 +835,9 @@ def _plan_and_execute_joint_motion( emb_sel=embodiment_selection, ) success, traj_path = successes[0].item(), traj_paths[0] + success = success and len(traj_path) != 4 + if traj_path is not None and len(traj_path) == 4: + breakpoint() planning_attempts += 1 if not success: @@ -803,40 +846,37 @@ def _plan_and_execute_joint_motion( "There is no accessible path from where you are to the desired pose. Try again", ) - q_traj = self._motion_generator.path_to_joint_trajectory(traj_path, embodiment_selection) - - # Follow the joint motion plan - indented_print(f"Plan has {len(q_traj)} steps") - for i, joint_pos in enumerate(q_traj): - indented_print(f"Executing motion plan step {i + 1}/{len(q_traj)}") - desired_joint_pos = joint_pos.cpu() - # Convert target joint positions to command - action = self._q_to_action(desired_joint_pos) - - base_target_reached = False - articulation_target_reached = False - - for i in range(m.MAX_STEPS_FOR_JOINT_MOTION): - current_joint_pos = self.robot.get_joint_positions() - joint_pos_diff = desired_joint_pos - current_joint_pos - base_joint_diff = joint_pos_diff[self.robot.base_control_idx] - articulation_joint_diff = joint_pos_diff[~self.robot.base_control_idx] # Gets all non-base joints - if th.max(th.abs(articulation_joint_diff)).item() < m.JOINT_POS_DIFF_THRESHOLD: - articulation_target_reached = True - # TODO: genralize this to transaltion&rotation + high/low precision modes - if th.max(th.abs(base_joint_diff)).item() < m.DEFAULT_DIST_THRESHOLD: - base_target_reached = True - if base_target_reached and articulation_target_reached: - break - yield self._postprocess_action(action) + return self._motion_generator.path_to_joint_trajectory(traj_path, embodiment_selection) + + def _execute_joint_motion(self, joint_pos, stop_on_contact=False, ignore_failure=False): + # Convert target joint positions to command + action = self._q_to_action(joint_pos) + + base_target_reached = False + articulation_target_reached = False + for _ in range(m.MAX_STEPS_FOR_JOINT_MOTION): + current_joint_pos = self.robot.get_joint_positions() + joint_pos_diff = joint_pos - current_joint_pos + base_joint_diff = joint_pos_diff[self.robot.base_control_idx] + articulation_joint_diff = joint_pos_diff[~self.robot.base_control_idx] # Gets all non-base joints + if th.max(th.abs(articulation_joint_diff)).item() < m.JOINT_POS_DIFF_THRESHOLD: + articulation_target_reached = True + # TODO: genralize this to transaltion&rotation + high/low precision modes + if th.max(th.abs(base_joint_diff)).item() < m.DEFAULT_DIST_THRESHOLD: + base_target_reached = True + if base_target_reached and articulation_target_reached: + break + if stop_on_contact and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False): + break + yield self._postprocess_action(action) - if not base_target_reached and not ignore_failure: + if not stop_on_contact and not ignore_failure: + if not base_target_reached: raise ActionPrimitiveError( ActionPrimitiveError.Reason.EXECUTION_ERROR, "Could not reach the target base joint positions. Try again", ) - - if not articulation_target_reached and not ignore_failure: + if not articulation_target_reached: raise ActionPrimitiveError( ActionPrimitiveError.Reason.EXECUTION_ERROR, "Could not reach the target articulation joint positions. Try again", @@ -952,7 +992,7 @@ def _move_hand_direct_ik( ), "Controller must be InverseKinematicsController" assert controller_config["mode"] == "pose_absolute_ori", "Controller must be in pose_absolute_ori mode" if in_world_frame: - target_pose = self._get_pose_in_robot_frame(target_pose) + target_pose = self._world_pose_to_robot_pose(target_pose) target_pos = target_pose[0] target_orn = target_pose[1] target_orn_axisangle = T.quat2axisangle(target_pose[1]) @@ -964,7 +1004,7 @@ def _move_hand_direct_ik( self._arm_targets[controller_name] = (target_pos, target_orn_axisangle) for i in range(m.MAX_STEPS_FOR_HAND_MOVE_IK): - current_pose = self._get_pose_in_robot_frame( + current_pose = self._world_pose_to_robot_pose( (self.robot.get_eef_position(self.arm), self.robot.get_eef_orientation(self.arm)) ) current_pos = current_pose[0] @@ -1263,7 +1303,7 @@ def _empty_action(self, follow_arm_targets=True): if isinstance(controller, InverseKinematicsController): arm = name.replace("arm_", "") target_pos, target_orn_axisangle = self._arm_targets[name] - current_pos, current_orn = self._get_pose_in_robot_frame( + current_pos, current_orn = self._world_pose_to_robot_pose( (self.robot.get_eef_position(arm), self.robot.get_eef_orientation(arm)) ) delta_pos = target_pos - current_pos @@ -1302,20 +1342,28 @@ def _reset_hand(self, arm=None, attached_obj=None): arm = self.arm indented_print("Resetting hand") # TODO: make this work with both hands - reset_eef_pose = self._get_reset_eef_pose()[arm] + reset_eef_pose = self._get_reset_eef_pose("world")[arm] yield from self._move_hand(reset_eef_pose, arm=arm, attached_obj=attached_obj) - @property - def _get_reset_eef_pose(self): + def _get_reset_eef_pose(self, frame="robot"): + """ + Get the reset eef pose for the robot + + Args: + frame (str): The frame in which the reset eef pose is specified, one of "robot" or "world" + + Returns: + dict of th.tensor: The reset eef pose for each robot arm + """ if isinstance(self.robot, Fetch): - return { + pose = { self.arm: ( th.tensor([0.48688125, -0.12507881, 0.97888719]), th.tensor([0.61324748, 0.61305553, -0.35266518, 0.35173529]), ) } elif isinstance(self.robot, R1): - return { + pose = { self.robot.arm_names[0]: ( th.tensor([0.43, 0.2, 1.2]), th.tensor([1.0, 0.0, 0.0, 0.0]), @@ -1331,6 +1379,12 @@ def _get_reset_eef_pose(self): # ) else: raise ValueError(f"Unsupported robot: {type(self.robot)}") + if frame == "robot": + return pose + elif frame == "world": + return {arm: self._robot_pose_to_world_pose(pose[arm]) for arm in pose} + else: + raise ValueError(f"Unsupported frame: {frame}") def _get_reset_joint_pos(self): reset_pose_fetch = th.tensor( @@ -1409,7 +1463,10 @@ def _navigate_to_pose(self, pose_2d): target_pos = {self.robot.base_footprint_link_name: pose_3d[0]} target_quat = {self.robot.base_footprint_link_name: pose_3d[1]} - yield from self._plan_and_execute_joint_motion(target_pos, target_quat, CuroboEmbodimentSelection.BASE) + q_traj = self._plan_joint_motion(target_pos, target_quat, CuroboEmbodimentSelection.BASE) + for i, joint_pos in enumerate(q_traj): + indented_print(f"Executing base motion plan step {i + 1}/{len(q_traj)}") + yield from self._execute_joint_motion(joint_pos.cpu(), stop_on_contact=False) # TODO: implement this for curobo? def _draw_plan(self, plan): @@ -1485,7 +1542,7 @@ def _navigate_to_pose_direct(self, pose_2d, low_precision=False): angle_threshold = m.LOW_PRECISION_ANGLE_THRESHOLD if low_precision else m.DEFAULT_ANGLE_THRESHOLD end_pose = self._get_robot_pose_from_2d_pose(pose_2d) - body_target_pose = self._get_pose_in_robot_frame(end_pose) + body_target_pose = self._world_pose_to_robot_pose(end_pose) for _ in range(m.MAX_STEPS_FOR_WAYPOINT_NAVIGATION): if th.norm(body_target_pose[0][:2]) < dist_threshold: @@ -1496,7 +1553,7 @@ def _navigate_to_pose_direct(self, pose_2d, low_precision=False): end_pose[0], T.euler2quat(th.tensor([0, 0, math.atan2(diff_pos[1], diff_pos[0])], dtype=th.float32)), ) - body_intermediate_pose = self._get_pose_in_robot_frame(intermediate_pose) + body_intermediate_pose = self._world_pose_to_robot_pose(intermediate_pose) diff_yaw = T.quat2euler(body_intermediate_pose[1])[2].item() if abs(diff_yaw) > m.DEFAULT_ANGLE_THRESHOLD: yield from self._rotate_in_place(intermediate_pose, angle_threshold=m.DEFAULT_ANGLE_THRESHOLD) @@ -1515,7 +1572,7 @@ def _navigate_to_pose_direct(self, pose_2d, low_precision=False): yield self._postprocess_action(action) - body_target_pose = self._get_pose_in_robot_frame(end_pose) + body_target_pose = self._world_pose_to_robot_pose(end_pose) else: raise ActionPrimitiveError( ActionPrimitiveError.Reason.EXECUTION_ERROR, @@ -1537,7 +1594,7 @@ def _rotate_in_place(self, end_pose, angle_threshold=m.DEFAULT_ANGLE_THRESHOLD): Returns: th.tensor or None: Action array for one step for the robot to rotate or None if it is done rotating """ - body_target_pose = self._get_pose_in_robot_frame(end_pose) + body_target_pose = self._world_pose_to_robot_pose(end_pose) diff_yaw = T.quat2euler(body_target_pose[1])[2].item() for _ in range(m.MAX_STEPS_FOR_WAYPOINT_NAVIGATION): @@ -1563,7 +1620,7 @@ def _rotate_in_place(self, end_pose, angle_threshold=m.DEFAULT_ANGLE_THRESHOLD): action[self.robot.controller_action_idx["base"]] = base_action yield self._postprocess_action(action) - body_target_pose = self._get_pose_in_robot_frame(end_pose) + body_target_pose = self._world_pose_to_robot_pose(end_pose) diff_yaw = T.quat2euler(body_target_pose[1])[2].item() else: raise ActionPrimitiveError( @@ -1774,7 +1831,7 @@ def _get_robot_pose_from_2d_pose(pose_2d): orn = T.euler2quat(th.tensor([0, 0, pose_2d[2]], dtype=th.float32)) return pos, orn - def _get_pose_in_robot_frame(self, pose): + def _world_pose_to_robot_pose(self, pose): """ Converts the pose in the world frame to the robot frame @@ -1789,6 +1846,22 @@ def _get_pose_in_robot_frame(self, pose): body_pose = self.robot.get_position_orientation() return T.relative_pose_transform(*pose, *body_pose) + def _robot_pose_to_world_pose(self, pose): + """ + Converts the pose in the robot frame to the world frame + + Args: + pose_2d (Iterable): (x, y, yaw) 2d pose + + Returns: + 2-tuple: + - 3-array: (x,y,z) Position in the world frame + - 4-array: (x,y,z,w) Quaternion orientation in the world frame + """ + body_pose = self.robot.get_position_orientation() + inverse_body_pose = T.invert_pose_transform(body_pose[0], body_pose[1]) + return T.relative_pose_transform(*pose, *inverse_body_pose) + def _get_hand_pose_for_object_pose(self, desired_pose): """ Gets the pose of the hand for the desired object pose From c06029c033a2e67b522e0ef905489e6284ded14c Mon Sep 17 00:00:00 2001 From: hang-yin Date: Mon, 11 Nov 2024 11:58:35 -0800 Subject: [PATCH 08/76] Make primitives more robust and fast; add debug marker --- omnigibson/action_primitives/curobo.py | 10 +- .../starter_semantic_action_primitives.py | 180 +++++++++++------- 2 files changed, 119 insertions(+), 71 deletions(-) diff --git a/omnigibson/action_primitives/curobo.py b/omnigibson/action_primitives/curobo.py index 51f92b987..b66f2b751 100644 --- a/omnigibson/action_primitives/curobo.py +++ b/omnigibson/action_primitives/curobo.py @@ -313,6 +313,7 @@ def check_collisions( self, q, check_self_collision=True, + skip_obstacle_update=False, ): """ Checks collisions between the sphere representation of the robot and the rest of the current scene @@ -330,7 +331,8 @@ def check_collisions( emb_sel = CuroboEmbodimentSelection.DEFAULT # Update obstacles - self.update_obstacles(ignore_paths=None, emb_sel=emb_sel) + if not skip_obstacle_update: + self.update_obstacles(ignore_paths=None, emb_sel=emb_sel) q_pos = self.robot.get_joint_positions().unsqueeze(0) cu_joint_state = lazy.curobo.types.state.JointState( @@ -398,6 +400,7 @@ def compute_trajectories( success_ratio=None, attached_obj=None, motion_constraint=None, + skip_obstacle_update=False, emb_sel=CuroboEmbodimentSelection.DEFAULT, ): """ @@ -483,8 +486,9 @@ def compute_trajectories( ) plan_cfg.pose_cost_metric = pose_cost_metric - # Refresh the collision state - self.update_obstacles(ignore_paths=None, emb_sel=emb_sel) + if not skip_obstacle_update: + # Refresh the collision state + self.update_obstacles(ignore_paths=None, emb_sel=emb_sel) for link_name in target_pos.keys(): target_pos_link = target_pos[link_name] diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 04d5de353..c39b5ec53 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -71,7 +71,7 @@ m.MAX_STEPS_FOR_SETTLING = 500 -m.MAX_STEPS_FOR_JOINT_MOTION = 500 +m.MAX_STEPS_FOR_JOINT_MOTION = 100 m.MAX_CARTESIAN_HAND_STEP = 0.002 m.MAX_STEPS_FOR_HAND_MOVE_JOINT = 500 @@ -81,7 +81,7 @@ m.MAX_ATTEMPTS_FOR_OPEN_CLOSE = 20 m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_WITH_OBJECT_AND_PREDICATE = 20 -m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT = 200 +m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT = 100 m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_IN_ROOM = 60 m.PREDICATE_SAMPLING_Z_OFFSET = 0.02 @@ -95,7 +95,7 @@ m.LOW_PRECISION_ANGLE_THRESHOLD = 0.2 m.TIAGO_TORSO_FIXED = False -m.JOINT_POS_DIFF_THRESHOLD = 0.005 +m.JOINT_POS_DIFF_THRESHOLD = 0.008 m.JOINT_CONTROL_MIN_ACTION = 0.0 m.MAX_ALLOWED_JOINT_ERROR_FOR_LINEAR_MOTION = math.radians(45) m.TIME_BEFORE_JOINT_STUCK_CHECK = 1.0 @@ -130,6 +130,7 @@ def __init__( enable_head_tracking=True, always_track_eef=False, task_relevant_objects_only=False, + debug_visual_marker=None, ): """ Initializes a StarterSemanticActionPrimitives generator. @@ -190,6 +191,7 @@ def __init__( arm_target = control_dict["joint_position"][arm_ctrl.dof_idx] self._arm_targets[arm] = arm_target + self.debug_visual_marker = debug_visual_marker @property def arm(self): @@ -262,7 +264,6 @@ def apply_ref(self, primitive, *args, attempts=5): Raises: ActionPrimitiveError: If primitive fails to execute """ - assert attempts > 0, "Must make at least one attempt" ctrl = self.controller_functions[primitive] errors = [] @@ -275,12 +276,12 @@ def apply_ref(self, primitive, *args, attempts=5): except ActionPrimitiveError as e: errors.append(e) - try: - # If we're not holding anything, release the hand so it doesn't stick to anything else. - if not self._get_obj_in_hand(): - yield from self._execute_release() - except ActionPrimitiveError: - pass + # # try: + # # # If we're not holding anything, release the hand so it doesn't stick to anything else. + # # if not self._get_obj_in_hand(): + # # yield from self._execute_release() + # # except ActionPrimitiveError: + # # pass try: # Make sure we retract the arm after every step @@ -502,7 +503,7 @@ def _grasp(self, obj): # Prepare data for the approach later. # TODO: fix this threshold - approach_pos = grasp_pose[0] + object_direction * 0.04 # m.GRASP_APPROACH_DISTANCE + approach_pos = grasp_pose[0] + object_direction * 0.055 # m.GRASP_APPROACH_DISTANCE approach_pose = (approach_pos, grasp_pose[1]) # If the grasp pose is too far, navigate. @@ -510,7 +511,7 @@ def _grasp(self, obj): yield from self._navigate_if_needed(obj, pose_on_obj=grasp_pose) indented_print("Moving hand to grasp pose") - yield from self._move_hand(grasp_pose, motion_constraint=[1, 1, 0, 0, 0, 0]) + yield from self._move_hand(grasp_pose, motion_constraint=[0, 0, 0, 0, 0, 0]) # Pre-grasp in sticky grasping mode. if self.robot.grasping_mode == "sticky": @@ -526,16 +527,15 @@ def _grasp(self, obj): indented_print("Performing grasp approach") # TODO: implement linear cartesian motion with curobo constrained planning yield from self._move_hand(approach_pose) # motion_constraint=[0, 0, 0, 0, 0, 1] - yield from self._execute_grasp() - # Step once to update - empty_action = self._q_to_action(self.robot.get_joint_positions()) - yield self._postprocess_action(empty_action) + # Step a few times to update + yield from self._settle_robot() indented_print("Checking grasp") obj_in_hand = self._get_obj_in_hand() if obj_in_hand is None: + breakpoint() raise ActionPrimitiveError( ActionPrimitiveError.Reason.POST_CONDITION_ERROR, "Grasp completed, but no object detected in hand after executing grasp", @@ -639,11 +639,29 @@ def _place_with_predicate(self, obj, predicate): ) # Sample location to place object - obj_pose = self._sample_pose_with_object_and_predicate(predicate, obj_in_hand, obj) - hand_pose = self._get_hand_pose_for_object_pose(obj_pose) + pose_candidates = [] + directly_move_hand_pose = None + while len(pose_candidates) < 20: + obj_pose = self._sample_pose_with_object_and_predicate(predicate, obj_in_hand, obj) + hand_pose = self._get_hand_pose_for_object_pose(obj_pose) + if self._target_in_reach_of_robot(hand_pose): + directly_move_hand_pose = hand_pose + break + else: + pose_candidates.append(hand_pose) + + if directly_move_hand_pose is not None: + yield from self._move_hand(directly_move_hand_pose) + else: + for candidate in pose_candidates: + valid_navigation_pose = self._sample_pose_near_object(obj, pose_on_obj=candidate) + if valid_navigation_pose is None: + continue + else: + yield from self._navigate_to_pose(valid_navigation_pose) + yield from self._move_hand(candidate) + break - yield from self._navigate_if_needed(obj, pose_on_obj=hand_pose) - yield from self._move_hand(hand_pose) yield from self._execute_release() if self._get_obj_in_hand() is not None: @@ -768,6 +786,8 @@ def _move_hand(self, target_pose, plan_motion=True, arm=None, attached_obj=None, Returns: th.tensor or None: Action array for one step for the robot to move hand or None if its at the target pose """ + if self.debug_visual_marker is not None: + self.debug_visual_marker.set_position_orientation(*target_pose) if arm is None: arm = self.arm if plan_motion: @@ -776,8 +796,7 @@ def _move_hand(self, target_pose, plan_motion=True, arm=None, attached_obj=None, if obj_in_hand is not None: # TODO: this root link logic is bad, fix it attached_obj = {self.robot.eef_link_names[arm]: obj_in_hand.root_link} - # TODO: this settle robot does not work because empty action doesn't work with the current controller modes - # yield from self._settle_robot() + yield from self._settle_robot() # curobo motion generator takes a pose but outputs joint positions left_hand_pos, left_hand_quat = target_pose if arm == "left" else self.robot.get_eef_pose(arm="left") right_hand_pos, right_hand_quat = target_pose if arm == "right" else self.robot.get_eef_pose(arm="right") @@ -795,7 +814,7 @@ def _move_hand(self, target_pose, plan_motion=True, arm=None, attached_obj=None, embodiment_selection=CuroboEmbodimentSelection.ARM, attached_obj=attached_obj, motion_constraint=motion_constraint, - ) + ).cpu() else: # Move EEF directly without collsion checking arm_joint_pos = self._convert_cartesian_to_joint_space(target_pose, arm=arm) @@ -803,9 +822,7 @@ def _move_hand(self, target_pose, plan_motion=True, arm=None, attached_obj=None, single_traj[self.robot.arm_control_idx[arm]] = arm_joint_pos q_traj = [single_traj] indented_print(f"Plan has {len(q_traj)} steps") - for i, joint_pos in enumerate(q_traj): - indented_print(f"Executing arm motion plan step {i + 1}/{len(q_traj)}") - yield from self._execute_joint_motion(joint_pos.cpu(), stop_on_contact=plan_motion is False) + yield from self._execute_motion_plan(q_traj, stop_on_contact=plan_motion is False) def _plan_joint_motion( self, @@ -818,6 +835,7 @@ def _plan_joint_motion( planning_attempts = 0 success = False traj_path = None + self._motion_generator.update_obstacles() while not success and planning_attempts < m.MAX_PLANNING_ATTEMPTS: successes, traj_paths = self._motion_generator.compute_trajectories( target_pos=target_pos, @@ -832,6 +850,7 @@ def _plan_joint_motion( success_ratio=1.0, attached_obj=attached_obj, motion_constraint=motion_constraint, + skip_obstacle_update=True, emb_sel=embodiment_selection, ) success, traj_path = successes[0].item(), traj_paths[0] @@ -848,39 +867,48 @@ def _plan_joint_motion( return self._motion_generator.path_to_joint_trajectory(traj_path, embodiment_selection) - def _execute_joint_motion(self, joint_pos, stop_on_contact=False, ignore_failure=False): - # Convert target joint positions to command - action = self._q_to_action(joint_pos) - - base_target_reached = False - articulation_target_reached = False - for _ in range(m.MAX_STEPS_FOR_JOINT_MOTION): - current_joint_pos = self.robot.get_joint_positions() - joint_pos_diff = joint_pos - current_joint_pos - base_joint_diff = joint_pos_diff[self.robot.base_control_idx] - articulation_joint_diff = joint_pos_diff[~self.robot.base_control_idx] # Gets all non-base joints - if th.max(th.abs(articulation_joint_diff)).item() < m.JOINT_POS_DIFF_THRESHOLD: - articulation_target_reached = True - # TODO: genralize this to transaltion&rotation + high/low precision modes - if th.max(th.abs(base_joint_diff)).item() < m.DEFAULT_DIST_THRESHOLD: - base_target_reached = True - if base_target_reached and articulation_target_reached: - break - if stop_on_contact and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False): - break - yield self._postprocess_action(action) + def _execute_motion_plan(self, q_traj, stop_on_contact=False, ignore_failure=False): + for i, joint_pos in enumerate(q_traj): + indented_print(f"Executing arm motion plan step {i + 1}/{len(q_traj)}") + # Convert target joint positions to command + action = self._q_to_action(joint_pos) + + base_target_reached = False + articulation_target_reached = False + collision_detected = False + for _ in range(m.MAX_STEPS_FOR_JOINT_MOTION): + current_joint_pos = self.robot.get_joint_positions() + joint_pos_diff = joint_pos - current_joint_pos + base_joint_diff = joint_pos_diff[self.robot.base_control_idx] + articulation_joint_diff = joint_pos_diff[~self.robot.base_control_idx] # Gets all non-base joints + if th.max(th.abs(articulation_joint_diff)).item() < m.JOINT_POS_DIFF_THRESHOLD: + articulation_target_reached = True + # TODO: genralize this to transaltion&rotation + high/low precision modes + if th.max(th.abs(base_joint_diff)).item() < m.DEFAULT_DIST_THRESHOLD: + base_target_reached = True + if base_target_reached and articulation_target_reached: + break + collision_detected = detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False) + if stop_on_contact and collision_detected: + break + yield self._postprocess_action(action) - if not stop_on_contact and not ignore_failure: - if not base_target_reached: - raise ActionPrimitiveError( - ActionPrimitiveError.Reason.EXECUTION_ERROR, - "Could not reach the target base joint positions. Try again", - ) - if not articulation_target_reached: - raise ActionPrimitiveError( - ActionPrimitiveError.Reason.EXECUTION_ERROR, - "Could not reach the target articulation joint positions. Try again", - ) + if not stop_on_contact and not ignore_failure: + if not base_target_reached: + breakpoint() + raise ActionPrimitiveError( + ActionPrimitiveError.Reason.EXECUTION_ERROR, + "Could not reach the target base joint positions. Try again", + ) + if not articulation_target_reached: + breakpoint() + raise ActionPrimitiveError( + ActionPrimitiveError.Reason.EXECUTION_ERROR, + "Could not reach the target articulation joint positions. Try again", + ) + # elif collision_detected: + # # TODO: figure out the logic between collision and failure + # break def _q_to_action(self, q): action = [] @@ -1343,6 +1371,8 @@ def _reset_hand(self, arm=None, attached_obj=None): indented_print("Resetting hand") # TODO: make this work with both hands reset_eef_pose = self._get_reset_eef_pose("world")[arm] + if self.debug_visual_marker is not None: + self.debug_visual_marker.set_position_orientation(*reset_eef_pose) yield from self._move_hand(reset_eef_pose, arm=arm, attached_obj=attached_obj) def _get_reset_eef_pose(self, frame="robot"): @@ -1460,13 +1490,13 @@ def _navigate_to_pose(self, pose_2d): # TODO: change defualt to 0.0? Why is it 0.1? # TODO: do we still need high/low precision modes? pose_3d[0][2] = 0.0 + if self.debug_visual_marker is not None: + self.debug_visual_marker.set_position_orientation(*pose_3d) target_pos = {self.robot.base_footprint_link_name: pose_3d[0]} target_quat = {self.robot.base_footprint_link_name: pose_3d[1]} - q_traj = self._plan_joint_motion(target_pos, target_quat, CuroboEmbodimentSelection.BASE) - for i, joint_pos in enumerate(q_traj): - indented_print(f"Executing base motion plan step {i + 1}/{len(q_traj)}") - yield from self._execute_joint_motion(joint_pos.cpu(), stop_on_contact=False) + q_traj = self._plan_joint_motion(target_pos, target_quat, CuroboEmbodimentSelection.BASE).cpu() + yield from self._execute_motion_plan(q_traj, stop_on_contact=True) # TODO: implement this for curobo? def _draw_plan(self, plan): @@ -1645,7 +1675,12 @@ def _sample_pose_near_object(self, obj, pose_on_obj=None, **kwargs): - 3-array: (x,y,z) Position in the world frame - 4-array: (x,y,z,w) Quaternion orientation in the world frame """ - for _ in range(m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT): + for _ in range(3): + og.sim.step() + self._motion_generator.update_obstacles() + attempt = 0 + while attempt < m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT: + # for _ in range(m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT): if pose_on_obj is None: pos_on_obj = self._sample_position_on_aabb_side(obj) pose_on_obj = [pos_on_obj, th.tensor([0, 0, 0, 1])] @@ -1662,6 +1697,12 @@ def _sample_pose_near_object(self, obj, pose_on_obj=None, **kwargs): yaw + math.pi - avg_arm_workspace_range, ] ) + + if self.debug_visual_marker is not None: + pose_3d = self._get_robot_pose_from_2d_pose(pose_2d) + self.debug_visual_marker.set_position_orientation(*pose_3d) + og.sim.step() + # Check room obj_rooms = ( obj.in_rooms @@ -1669,10 +1710,11 @@ def _sample_pose_near_object(self, obj, pose_on_obj=None, **kwargs): else [self.robot.scene._seg_map.get_room_instance_by_point(pose_on_obj[0][:2])] ) if self.robot.scene._seg_map.get_room_instance_by_point(pose_2d[:2]) not in obj_rooms: - indented_print("Candidate position is in the wrong room.") + # indented_print("Candidate position is in the wrong room.") continue if not self._test_pose(pose_2d, pose_on_obj=pose_on_obj, **kwargs): + attempt += 1 continue indented_print("Found valid position near object.") @@ -1805,10 +1847,12 @@ def _test_pose(self, pose_2d, pose_on_obj=None): return False joint_pos = self.robot.get_joint_positions() - joint_pos[self.robot.base_idx[:3]] = pose[0] + joint_pos[self.robot.base_idx[:2]] = pose[0][:2] joint_pos[self.robot.base_idx[3:]] = T.quat2euler(pose[1]) - collision_results = self._motion_generator.check_collisions(joint_pos, check_self_collision=False) + collision_results = self._motion_generator.check_collisions( + joint_pos, check_self_collision=False, skip_obstacle_update=True + ) if collision_results[0].cpu().item(): indented_print("Candidate position failed collision test.") @@ -1899,11 +1943,11 @@ def _settle_robot(self): th.tensor or None: Action array for one step for the robot to do nothing """ for _ in range(30): - empty_action = self._empty_action() + empty_action = self._q_to_action(self.robot.get_joint_positions()) yield self._postprocess_action(empty_action) for _ in range(m.MAX_STEPS_FOR_SETTLING): if th.norm(self.robot.get_linear_velocity()) < 0.01: break - empty_action = self._empty_action() + empty_action = self._q_to_action(self.robot.get_joint_positions()) yield self._postprocess_action(empty_action) From c1b4986d9ccef90393e5f77f4915c224af10c1f8 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Tue, 12 Nov 2024 10:59:15 -0800 Subject: [PATCH 09/76] Adapt curobo primitives to Tiago; call curobo planning and collision checks in batched mode; generalize primitives to dual-arm setting --- omnigibson/action_primitives/curobo.py | 11 +- .../starter_semantic_action_primitives.py | 306 ++++++++++-------- 2 files changed, 181 insertions(+), 136 deletions(-) diff --git a/omnigibson/action_primitives/curobo.py b/omnigibson/action_primitives/curobo.py index 3e45dcc9f..eedee511a 100644 --- a/omnigibson/action_primitives/curobo.py +++ b/omnigibson/action_primitives/curobo.py @@ -337,7 +337,6 @@ def check_collisions( self, q, check_self_collision=True, - skip_obstacle_update=False, ): """ Checks collisions between the sphere representation of the robot and the rest of the current scene @@ -355,8 +354,7 @@ def check_collisions( emb_sel = CuroboEmbodimentSelection.DEFAULT # Update obstacles - if not skip_obstacle_update: - self.update_obstacles() + self.update_obstacles() q_pos = self.robot.get_joint_positions().unsqueeze(0) cu_joint_state = lazy.curobo.types.state.JointState( @@ -424,7 +422,6 @@ def compute_trajectories( success_ratio=None, attached_obj=None, motion_constraint=None, - skip_obstacle_update=False, emb_sel=CuroboEmbodimentSelection.DEFAULT, ): """ @@ -510,9 +507,7 @@ def compute_trajectories( ) plan_cfg.pose_cost_metric = pose_cost_metric - if not skip_obstacle_update: - # Refresh the collision state - self.update_obstacles() + self.update_obstacles() for link_name in target_pos.keys(): target_pos_link = target_pos[link_name] @@ -584,7 +579,7 @@ def compute_trajectories( object_names=obj_paths, ee_pose=ee_pose, link_name=self.robot.curobo_attached_object_link_names[ee_link_name], - scale=0.7, + scale=0.99, ) all_rollout_fns = [ diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index c39b5ec53..684635b97 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -71,7 +71,7 @@ m.MAX_STEPS_FOR_SETTLING = 500 -m.MAX_STEPS_FOR_JOINT_MOTION = 100 +m.MAX_STEPS_FOR_JOINT_MOTION = 500 m.MAX_CARTESIAN_HAND_STEP = 0.002 m.MAX_STEPS_FOR_HAND_MOVE_JOINT = 500 @@ -95,7 +95,7 @@ m.LOW_PRECISION_ANGLE_THRESHOLD = 0.2 m.TIAGO_TORSO_FIXED = False -m.JOINT_POS_DIFF_THRESHOLD = 0.008 +m.JOINT_POS_DIFF_THRESHOLD = 0.01 m.JOINT_CONTROL_MIN_ACTION = 0.0 m.MAX_ALLOWED_JOINT_ERROR_FOR_LINEAR_MOTION = math.radians(45) m.TIME_BEFORE_JOINT_STUCK_CHECK = 1.0 @@ -128,8 +128,11 @@ def __init__( self, robot, enable_head_tracking=True, - always_track_eef=False, + # TODO: fix this later + always_track_eef=True, task_relevant_objects_only=False, + planning_batch_size=5, + collision_check_batch_size=5, debug_visual_marker=None, ): """ @@ -148,8 +151,7 @@ def __init__( "It currently only works with Fetch and Tiago with their JointControllers set to delta mode." ) super().__init__(robot) - # TODO: Make this batch_size a parameter? - self._motion_generator = CuRoboMotionGenerator(robot=self.robot, batch_size=1) + self._motion_generator = CuRoboMotionGenerator(robot=self.robot, batch_size=planning_batch_size) self.controller_functions = { StarterSemanticActionPrimitiveSet.GRASP: self._grasp, StarterSemanticActionPrimitiveSet.PLACE_ON_TOP: self._place_on_top, @@ -191,6 +193,8 @@ def __init__( arm_target = control_dict["joint_position"][arm_ctrl.dof_idx] self._arm_targets[arm] = arm_target + + self._collision_check_batch_size = collision_check_batch_size self.debug_visual_marker = debug_visual_marker @property @@ -518,11 +522,12 @@ def _grasp(self, obj): indented_print("Pregrasp squeeze") yield from self._execute_grasp() - # Since the grasp pose is slightly off the object, we want to move towards the object, around 5cm. - # It's okay if we can't go all the way because we run into the object. - indented_print("Performing grasp approach") - # Use direct IK to move the hand to the approach pose. - yield from self._move_hand(approach_pose, plan_motion=False) + if self._get_obj_in_hand() is None: + # Since the grasp pose is slightly off the object, we want to move towards the object, around 5cm. + # It's okay if we can't go all the way because we run into the object. + indented_print("Performing grasp approach") + # Use direct IK to move the hand to the approach pose. + yield from self._move_hand(approach_pose, avoid_collision=False) elif self.robot.grasping_mode == "assisted": indented_print("Performing grasp approach") # TODO: implement linear cartesian motion with curobo constrained planning @@ -535,7 +540,6 @@ def _grasp(self, obj): indented_print("Checking grasp") obj_in_hand = self._get_obj_in_hand() if obj_in_hand is None: - breakpoint() raise ActionPrimitiveError( ActionPrimitiveError.Reason.POST_CONDITION_ERROR, "Grasp completed, but no object detected in hand after executing grasp", @@ -544,8 +548,10 @@ def _grasp(self, obj): # TODO: reset density back when releasing obj_in_hand.root_link.density = 1.0 - indented_print("Moving hand back") - yield from self._reset_hand() + indented_print("Moving hands back") + # TODO: reset both hands with one call + for arm in self.robot.arm_names: + yield from self._reset_hand(arm) indented_print("Done with grasp") @@ -644,7 +650,7 @@ def _place_with_predicate(self, obj, predicate): while len(pose_candidates) < 20: obj_pose = self._sample_pose_with_object_and_predicate(predicate, obj_in_hand, obj) hand_pose = self._get_hand_pose_for_object_pose(obj_pose) - if self._target_in_reach_of_robot(hand_pose): + if self._target_in_reach_of_robot(hand_pose)[self.arm]: directly_move_hand_pose = hand_pose break else: @@ -709,7 +715,7 @@ def _target_in_reach_of_robot(self, target_pose): target_pose (Iterable of array): Position and orientation arrays in an iterable for the pose for the eef Returns: - bool: Whether eef can reach the target pose + dict: Whether each eef can reach the target pose """ relative_target_pose = self._world_pose_to_robot_pose(target_pose) return self._target_in_reach_of_robot_relative(relative_target_pose) @@ -722,28 +728,33 @@ def _target_in_reach_of_robot_relative(self, relative_target_pose): target_pose (Iterable of array): Position and orientation arrays in an iterable for pose for the eef Returns: - bool: Whether eef can the reach target pose + dict: Whether each eef can reach the target pose """ # TODO: output may need to be a dictionary of arms and whether they can reach the target pose + target_in_reach = dict() for arm in self.robot.arm_names: - if self._ik_solver_cartesian_to_joint_space(relative_target_pose, arm=arm) is not None: - return True - return False + target_in_reach[arm] = self._ik_solver_cartesian_to_joint_space(relative_target_pose, arm=arm) is not None + return target_in_reach - @property - def _manipulation_control_idx(self): + def _manipulation_control_idx(self, arm=None): """The appropriate manipulation control idx for the current settings.""" + if arm is None: + arm = self.arm + # TODO: look into this if isinstance(self.robot, Tiago): - if m.TIAGO_TORSO_FIXED: - assert self.arm == "left", "Fixed torso mode only supports left arm!" - return self.robot.arm_control_idx["left"] + if arm == "left": + return ( + self.robot.arm_control_idx["left"] + if m.TIAGO_TORSO_FIXED + else th.cat([self.robot.trunk_control_idx, self.robot.arm_control_idx["left"]]) + ) else: - return th.cat([self.robot.trunk_control_idx, self.robot.arm_control_idx[self.arm]]) - elif isinstance(self.robot, Fetch): + return self.robot.arm_control_idx["right"] + if isinstance(self.robot, Fetch): return th.cat([self.robot.trunk_control_idx, self.robot.arm_control_idx[self.arm]]) # Otherwise just return the default arm control idx - return self.robot.arm_control_idx[self.arm] + return self.robot.arm_control_idx[arm] def _ik_solver_cartesian_to_joint_space(self, relative_target_pose, arm=None): """ @@ -762,21 +773,21 @@ def _ik_solver_cartesian_to_joint_space(self, relative_target_pose, arm=None): ik_solver = IKSolver( robot_description_path=self.robot.robot_arm_descriptor_yamls[arm], robot_urdf_path=self.robot.urdf_path, - reset_joint_pos=self.robot.reset_joint_pos[self._manipulation_control_idx], + reset_joint_pos=self.robot.reset_joint_pos[self._manipulation_control_idx(arm)], eef_name=self.robot.eef_link_names[self.arm], ) # Grab the joint positions in order to reach the desired pose target joint_pos = ik_solver.solve( target_pos=relative_target_pose[0], - # target_quat=relative_target_pose[1], + target_quat=relative_target_pose[1], max_iterations=200, - initial_joint_pos=self.robot.get_joint_positions()[self._manipulation_control_idx], + initial_joint_pos=self.robot.get_joint_positions()[self._manipulation_control_idx(arm)], tolerance_pos=0.005, ) return joint_pos - def _move_hand(self, target_pose, plan_motion=True, arm=None, attached_obj=None, motion_constraint=None): + def _move_hand(self, target_pose, avoid_collision=True, arm=None, attached_obj=None, motion_constraint=None): """ Yields action for the robot to move hand so the eef is in the target pose using the planner @@ -790,7 +801,7 @@ def _move_hand(self, target_pose, plan_motion=True, arm=None, attached_obj=None, self.debug_visual_marker.set_position_orientation(*target_pose) if arm is None: arm = self.arm - if plan_motion: + if avoid_collision: # If an object is grasped, we need to pass it to the motion planner obj_in_hand = self._get_obj_in_hand() if obj_in_hand is not None: @@ -798,16 +809,26 @@ def _move_hand(self, target_pose, plan_motion=True, arm=None, attached_obj=None, attached_obj = {self.robot.eef_link_names[arm]: obj_in_hand.root_link} yield from self._settle_robot() # curobo motion generator takes a pose but outputs joint positions - left_hand_pos, left_hand_quat = target_pose if arm == "left" else self.robot.get_eef_pose(arm="left") - right_hand_pos, right_hand_quat = target_pose if arm == "right" else self.robot.get_eef_pose(arm="right") - target_pos = { - self.robot.eef_link_names["left"]: left_hand_pos, - self.robot.eef_link_names["right"]: right_hand_pos, - } - target_quat = { - self.robot.eef_link_names["left"]: left_hand_quat, - self.robot.eef_link_names["right"]: right_hand_quat, - } + if isinstance(self.robot, Tiago) and not m.TIAGO_TORSO_FIXED: + target_pos = { + self.robot.eef_link_names[self.arm]: target_pose[0], + } + target_quat = { + self.robot.eef_link_names[self.arm]: target_pose[1], + } + else: + left_hand_pos, left_hand_quat = target_pose if arm == "left" else self.robot.get_eef_pose(arm="left") + right_hand_pos, right_hand_quat = ( + target_pose if arm == "right" else self.robot.get_eef_pose(arm="right") + ) + target_pos = { + self.robot.eef_link_names["left"]: left_hand_pos, + self.robot.eef_link_names["right"]: right_hand_pos, + } + target_quat = { + self.robot.eef_link_names["left"]: left_hand_quat, + self.robot.eef_link_names["right"]: right_hand_quat, + } q_traj = self._plan_joint_motion( target_pos=target_pos, target_quat=target_quat, @@ -819,10 +840,10 @@ def _move_hand(self, target_pose, plan_motion=True, arm=None, attached_obj=None, # Move EEF directly without collsion checking arm_joint_pos = self._convert_cartesian_to_joint_space(target_pose, arm=arm) single_traj = self.robot.get_joint_positions() - single_traj[self.robot.arm_control_idx[arm]] = arm_joint_pos + single_traj[self._manipulation_control_idx(arm)] = arm_joint_pos q_traj = [single_traj] indented_print(f"Plan has {len(q_traj)} steps") - yield from self._execute_motion_plan(q_traj, stop_on_contact=plan_motion is False) + yield from self._execute_motion_plan(q_traj, stop_on_contact=avoid_collision is False) def _plan_joint_motion( self, @@ -835,13 +856,18 @@ def _plan_joint_motion( planning_attempts = 0 success = False traj_path = None - self._motion_generator.update_obstacles() + # aggregate target_pos and target_quat to match batch_size + target_pos = {k: th.stack([v for _ in range(self._motion_generator.batch_size)]) for k, v in target_pos.items()} + target_quat = { + k: th.stack([v for _ in range(self._motion_generator.batch_size)]) for k, v in target_quat.items() + } + # TODO: call curobo with batch_size > 1 instead of iterating while not success and planning_attempts < m.MAX_PLANNING_ATTEMPTS: successes, traj_paths = self._motion_generator.compute_trajectories( target_pos=target_pos, target_quat=target_quat, is_local=False, - max_attempts=1, + max_attempts=5, timeout=60.0, ik_fail_return=5, enable_finetune_trajopt=True, @@ -850,15 +876,15 @@ def _plan_joint_motion( success_ratio=1.0, attached_obj=attached_obj, motion_constraint=motion_constraint, - skip_obstacle_update=True, emb_sel=embodiment_selection, ) - success, traj_path = successes[0].item(), traj_paths[0] - success = success and len(traj_path) != 4 - if traj_path is not None and len(traj_path) == 4: - breakpoint() - planning_attempts += 1 - + # Grab the first successful trajectory, if not found, then continue planning + success_idx = th.where(successes)[0].cpu() + if len(success_idx) > 0: + success = True + traj_path = traj_paths[success_idx[0]] + else: + planning_attempts += self._motion_generator.batch_size if not success: raise ActionPrimitiveError( ActionPrimitiveError.Reason.PLANNING_ERROR, @@ -869,39 +895,44 @@ def _plan_joint_motion( def _execute_motion_plan(self, q_traj, stop_on_contact=False, ignore_failure=False): for i, joint_pos in enumerate(q_traj): - indented_print(f"Executing arm motion plan step {i + 1}/{len(q_traj)}") + indented_print(f"Executing motion plan step {i + 1}/{len(q_traj)}") # Convert target joint positions to command action = self._q_to_action(joint_pos) base_target_reached = False articulation_target_reached = False collision_detected = False + articulation_control_idx = th.cat( + (self.robot.arm_control_idx["left"], self.robot.arm_control_idx["right"], self.robot.trunk_control_idx) + ) for _ in range(m.MAX_STEPS_FOR_JOINT_MOTION): current_joint_pos = self.robot.get_joint_positions() joint_pos_diff = joint_pos - current_joint_pos base_joint_diff = joint_pos_diff[self.robot.base_control_idx] - articulation_joint_diff = joint_pos_diff[~self.robot.base_control_idx] # Gets all non-base joints + articulation_joint_diff = joint_pos_diff[articulation_control_idx] # Gets all non-base joints if th.max(th.abs(articulation_joint_diff)).item() < m.JOINT_POS_DIFF_THRESHOLD: articulation_target_reached = True # TODO: genralize this to transaltion&rotation + high/low precision modes - if th.max(th.abs(base_joint_diff)).item() < m.DEFAULT_DIST_THRESHOLD: + distance_threshold = ( + m.LOW_PRECISION_DIST_THRESHOLD if isinstance(self.robot, Tiago) else m.DEFAULT_DIST_THRESHOLD + ) + if th.max(th.abs(base_joint_diff)).item() < distance_threshold: base_target_reached = True if base_target_reached and articulation_target_reached: break - collision_detected = detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False) - if stop_on_contact and collision_detected: - break + # TODO: bring this back once we sort out the controller overshoot problem + # collision_detected = detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False) + # if stop_on_contact and collision_detected: + # break yield self._postprocess_action(action) if not stop_on_contact and not ignore_failure: if not base_target_reached: - breakpoint() raise ActionPrimitiveError( ActionPrimitiveError.Reason.EXECUTION_ERROR, "Could not reach the target base joint positions. Try again", ) if not articulation_target_reached: - breakpoint() raise ActionPrimitiveError( ActionPrimitiveError.Reason.EXECUTION_ERROR, "Could not reach the target articulation joint positions. Try again", @@ -959,7 +990,7 @@ def _move_hand_direct_joint(self, joint_pos, stop_on_contact=False, ignore_failu self._arm_targets[controller_name] = joint_pos for i in range(m.MAX_STEPS_FOR_HAND_MOVE_JOINT): - current_joint_pos = self.robot.get_joint_positions()[self._manipulation_control_idx] + current_joint_pos = self.robot.get_joint_positions()[self._manipulation_control_idx(self.arm)] diff_joint_pos = joint_pos - current_joint_pos if th.max(th.abs(diff_joint_pos)).item() < m.JOINT_POS_DIFF_THRESHOLD: return @@ -1142,11 +1173,11 @@ def _move_hand_linearly_cartesian( for joint_pos in joint_space_data: # Check if the movement can be done roughly linearly. - current_joint_positions = self.robot.get_joint_positions()[self._manipulation_control_idx] + current_joint_positions = self.robot.get_joint_positions()[self._manipulation_control_idx(self.arm)] failed_joints = [] for joint_idx, target_joint_pos, current_joint_pos in zip( - self._manipulation_control_idx, joint_pos, current_joint_positions + self._manipulation_control_idx(self.arm), joint_pos, current_joint_positions ): if th.abs(target_joint_pos - current_joint_pos) > m.MAX_ALLOWED_JOINT_ERROR_FOR_LINEAR_MOTION: failed_joints.append(joints[joint_idx].joint_name) @@ -1348,7 +1379,7 @@ def _empty_action(self, follow_arm_targets=True): raise ValueError("Unexpected IK control mode") else: target_joint_pos = self._arm_targets[name] - current_joint_pos = self.robot.get_joint_positions()[self._manipulation_control_idx] + current_joint_pos = self.robot.get_joint_positions()[self._manipulation_control_idx(self.arm)] if controller.use_delta_commands: partial_action = target_joint_pos - current_joint_pos else: @@ -1403,10 +1434,17 @@ def _get_reset_eef_pose(self, frame="robot"): th.tensor([-1.0, 0.0, 0.0, 0.0]), ), } - # TODO: Define this for Tiago - # th.tensor([0.28493954, 0.37450749, 1.1512334]), th.tensor( - # [-0.21533823, 0.05361032, -0.08631776, 0.97123871] - # ) + elif isinstance(self.robot, Tiago): + pose = { + self.robot.arm_names[0]: ( + th.tensor([0.4997, 0.2497, 0.6357]), + th.tensor([-0.5609, 0.5617, 0.4299, 0.4302]), + ), + self.robot.arm_names[1]: ( + th.tensor([0.4978, -0.2521, 0.6357]), + th.tensor([-0.5609, -0.5617, 0.4299, -0.4302]), + ), + } else: raise ValueError(f"Unsupported robot: {type(self.robot)}") if frame == "robot": @@ -1535,10 +1573,10 @@ def _navigate_if_needed(self, obj, pose_on_obj=None, **kwargs): th.tensor or None: Action array for one step for the robot to navigate or None if it is done navigating """ if pose_on_obj is not None: - if self._target_in_reach_of_robot(pose_on_obj): + if self._target_in_reach_of_robot(pose_on_obj)[self.arm]: # No need to navigate. return - elif self._target_in_reach_of_robot(obj.get_position_orientation()): + elif self._target_in_reach_of_robot(obj.get_position_orientation())[self.arm]: return yield from self._navigate_to_obj(obj, pose_on_obj=pose_on_obj, **kwargs) @@ -1675,50 +1713,50 @@ def _sample_pose_near_object(self, obj, pose_on_obj=None, **kwargs): - 3-array: (x,y,z) Position in the world frame - 4-array: (x,y,z,w) Quaternion orientation in the world frame """ - for _ in range(3): - og.sim.step() - self._motion_generator.update_obstacles() + # TODO: make this a macro + distance_lo, distance_hi = 0.0, 1.0 + yaw_lo, yaw_hi = -math.pi, math.pi + avg_arm_workspace_range = th.mean(self.robot.arm_workspace_range[self.arm]) + + target_pose = ( + (self._sample_position_on_aabb_side(obj), th.tensor([0, 0, 0, 1])) if pose_on_obj is None else pose_on_obj + ) + + obj_rooms = ( + obj.in_rooms if obj.in_rooms else [self.robot.scene._seg_map.get_room_instance_by_point(target_pose[0][:2])] + ) + attempt = 0 while attempt < m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT: - # for _ in range(m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT): - if pose_on_obj is None: - pos_on_obj = self._sample_position_on_aabb_side(obj) - pose_on_obj = [pos_on_obj, th.tensor([0, 0, 0, 1])] - - distance_lo, distance_hi = 0.0, 1.0 - distance = (th.rand(1) * (distance_hi - distance_lo) + distance_lo).item() - yaw_lo, yaw_hi = -math.pi, math.pi - yaw = th.rand(1) * (yaw_hi - yaw_lo) + yaw_lo - avg_arm_workspace_range = th.mean(self.robot.arm_workspace_range[self.arm]) - pose_2d = th.cat( - [ - pose_on_obj[0][0] + distance * th.cos(yaw), - pose_on_obj[0][1] + distance * th.sin(yaw), - yaw + math.pi - avg_arm_workspace_range, - ] - ) - if self.debug_visual_marker is not None: - pose_3d = self._get_robot_pose_from_2d_pose(pose_2d) - self.debug_visual_marker.set_position_orientation(*pose_3d) - og.sim.step() + candidate_poses = [] + for _ in range(self._collision_check_batch_size): + while True: + distance = (th.rand(1) * (distance_hi - distance_lo) + distance_lo).item() + yaw = th.rand(1) * (yaw_hi - yaw_lo) + yaw_lo + candidate_2d_pose = th.cat( + [ + target_pose[0][0] + distance * th.cos(yaw), + target_pose[0][1] + distance * th.sin(yaw), + yaw + math.pi - avg_arm_workspace_range, + ] + ) - # Check room - obj_rooms = ( - obj.in_rooms - if obj.in_rooms - else [self.robot.scene._seg_map.get_room_instance_by_point(pose_on_obj[0][:2])] - ) - if self.robot.scene._seg_map.get_room_instance_by_point(pose_2d[:2]) not in obj_rooms: - # indented_print("Candidate position is in the wrong room.") - continue + # Check room + if self.robot.scene._seg_map.get_room_instance_by_point(candidate_2d_pose[:2]) in obj_rooms: + # indented_print("Candidate position is in the wrong room.") + break + candidate_poses.append(candidate_2d_pose) - if not self._test_pose(pose_2d, pose_on_obj=pose_on_obj, **kwargs): - attempt += 1 - continue + result = self._validate_poses(candidate_poses, pose_on_obj=target_pose, **kwargs) + + # If anything in result is true, return the pose + for i, res in enumerate(result): + if res: + indented_print("Found valid position near object.") + return candidate_poses[i] - indented_print("Found valid position near object.") - return pose_2d + attempt += self._collision_check_batch_size raise ActionPrimitiveError( ActionPrimitiveError.Reason.SAMPLING_ERROR, @@ -1829,35 +1867,46 @@ def _sample_pose_with_object_and_predicate( {"target object": target_obj.name, "object in hand": held_obj.name, "relation": pred_map[predicate]}, ) - def _test_pose(self, pose_2d, pose_on_obj=None): + def _validate_poses(self, candidate_poses, pose_on_obj=None, arm=None): """ - Determines whether the robot can reach the pose on the object and is not in collision at the specified 2d pose + Determines whether the robot can reach the pose on the object and is not in collision at the specified 2d poses Args: - pose_2d (Iterable): (x, y, yaw) 2d pose + list of arrays: Candidate 2d poses (x, y, yaw) pose_on_obj (Iterable of arrays): Pose on the object in the world frame Returns: - bool: True if the robot is in a valid pose, False otherwise + list of bool: Whether the robot can reach the pose on the object and is not in collision at the specified 2d poses """ - pose = self._get_robot_pose_from_2d_pose(pose_2d) - if pose_on_obj is not None: - relative_pose = T.relative_pose_transform(*pose_on_obj, *pose) - if not self._target_in_reach_of_robot_relative(relative_pose): - return False + if arm is None: + arm = self.arm - joint_pos = self.robot.get_joint_positions() - joint_pos[self.robot.base_idx[:2]] = pose[0][:2] - joint_pos[self.robot.base_idx[3:]] = T.quat2euler(pose[1]) + # result = [False] * len(candidate_poses) + candidate_joint_positions = [] + current_joint_pos = self.robot.get_joint_positions() + for pose in candidate_poses: + joint_pos = current_joint_pos.clone() + joint_pos[self.robot.base_idx[:2]] = pose[:2] + joint_pos[self.robot.base_idx[3:]] = th.tensor([0.0, 0.0, pose[2]]) + candidate_joint_positions.append(joint_pos) + candidate_joint_positions = th.stack(candidate_joint_positions) + invalid_results = self._motion_generator.check_collisions( + candidate_joint_positions, check_self_collision=False + ).cpu() + + # Grab the candidates that passed that collision check and check if they are in reach + # TODO: iteratively call target_in_reach with LULA for now; change to curobo batched checking later for speed + for i in range(len(candidate_poses)): + if invalid_results[i].item(): + continue - collision_results = self._motion_generator.check_collisions( - joint_pos, check_self_collision=False, skip_obstacle_update=True - ) + if pose_on_obj is not None: + pose = self._get_robot_pose_from_2d_pose(candidate_poses[i]) + relative_pose = T.relative_pose_transform(*pose_on_obj, *pose) + if not self._target_in_reach_of_robot_relative(relative_pose)[arm]: + invalid_results[i] = True - if collision_results[0].cpu().item(): - indented_print("Candidate position failed collision test.") - return False - return True + return ~invalid_results @staticmethod def _get_robot_pose_from_2d_pose(pose_2d): @@ -1942,6 +1991,7 @@ def _settle_robot(self): Returns: th.tensor or None: Action array for one step for the robot to do nothing """ + # TODO: fix empty action for _ in range(30): empty_action = self._q_to_action(self.robot.get_joint_positions()) yield self._postprocess_action(empty_action) From 671db194f2336b67225f4d7858a13ef70670f1f5 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Tue, 12 Nov 2024 15:06:31 -0800 Subject: [PATCH 10/76] Tune batch size to avoid gpu memory error --- .../starter_semantic_action_primitives.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 684635b97..7800be840 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -131,7 +131,7 @@ def __init__( # TODO: fix this later always_track_eef=True, task_relevant_objects_only=False, - planning_batch_size=5, + planning_batch_size=3, collision_check_batch_size=5, debug_visual_marker=None, ): @@ -550,8 +550,9 @@ def _grasp(self, obj): indented_print("Moving hands back") # TODO: reset both hands with one call - for arm in self.robot.arm_names: - yield from self._reset_hand(arm) + yield from self._reset_hand(self.arm) + # for arm in self.robot.arm_names: + # yield from self._reset_hand(arm) indented_print("Done with grasp") From b3fd62b6e041c68712918adc40f89e12e5f7fcc2 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Tue, 12 Nov 2024 15:06:56 -0800 Subject: [PATCH 11/76] Add integral term to joint controller --- omnigibson/controllers/joint_controller.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/omnigibson/controllers/joint_controller.py b/omnigibson/controllers/joint_controller.py index 4ee046cd6..26710b24f 100644 --- a/omnigibson/controllers/joint_controller.py +++ b/omnigibson/controllers/joint_controller.py @@ -20,6 +20,8 @@ # Create settings for this module m = create_module_macros(module_path=__file__) m.DEFAULT_JOINT_POS_KP = 50.0 +m.DEFAULT_JOINT_POS_KI = 0.1 +m.DEFAULT_JOINT_MAX_INTEGRAL_ERROR = 1.0 m.DEFAULT_JOINT_POS_DAMPING_RATIO = 1.0 # critically damped m.DEFAULT_JOINT_VEL_KP = 2.0 @@ -45,6 +47,8 @@ def __init__( command_output_limits="default", pos_kp=None, pos_damping_ratio=None, + pos_ki=None, + max_integral_error=None, vel_kp=None, use_impedances=False, use_gravity_compensation=False, @@ -102,6 +106,10 @@ def __init__( if self._motor_type == "position": pos_kp = m.DEFAULT_JOINT_POS_KP if pos_kp is None else pos_kp pos_damping_ratio = m.DEFAULT_JOINT_POS_DAMPING_RATIO if pos_damping_ratio is None else pos_damping_ratio + pos_ki = m.DEFAULT_JOINT_POS_KI if pos_ki is None else pos_ki + max_integral_error = ( + m.DEFAULT_JOINT_MAX_INTEGRAL_ERROR if max_integral_error is None else max_integral_error + ) elif self._motor_type == "velocity": vel_kp = m.DEFAULT_JOINT_VEL_KP if vel_kp is None else vel_kp assert ( @@ -113,6 +121,9 @@ def __init__( assert vel_kp is None, "Cannot set vel_kp for JointController with motor_type=effort!" self.pos_kp = pos_kp self.pos_kd = None if pos_kp is None or pos_damping_ratio is None else 2 * math.sqrt(pos_kp) * pos_damping_ratio + self.pos_ki = pos_ki + self._integral_error = th.zeros_like(dof_idx, dtype=th.float32) + self.max_integral_error = max_integral_error self.vel_kp = vel_kp self._use_impedances = use_impedances self._use_gravity_compensation = use_gravity_compensation @@ -204,7 +215,11 @@ def compute_control(self, goal_dict, control_dict): # Run impedance controller -- effort = pos_err * kp + vel_err * kd position_error = target - base_value vel_pos_error = -control_dict[f"joint_velocity"][self.dof_idx] - u = position_error * self.pos_kp + vel_pos_error * self.pos_kd + + # Update integral error + self._integral_error += position_error * (1.0 / self.control_freq) # Integrate error over time + self._integral_error = self._integral_error.clip(-self.max_integral_error, self.max_integral_error) + u = position_error * self.pos_kp + vel_pos_error * self.pos_kd + self._integral_error * self.pos_ki elif self._motor_type == "velocity": # Compute command torques via PI velocity controller plus gravity compensation torques velocity_error = target - base_value From 81647d3c95c457a8a93dd91c4fea3f3aec4bd8c1 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Tue, 12 Nov 2024 15:15:44 -0800 Subject: [PATCH 12/76] Move hand upwards by a little after grasp to avoid getting stuck --- .../starter_semantic_action_primitives.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 7800be840..29604cb12 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -517,8 +517,8 @@ def _grasp(self, obj): indented_print("Moving hand to grasp pose") yield from self._move_hand(grasp_pose, motion_constraint=[0, 0, 0, 0, 0, 0]) - # Pre-grasp in sticky grasping mode. if self.robot.grasping_mode == "sticky": + # Pre-grasp in sticky grasping mode. indented_print("Pregrasp squeeze") yield from self._execute_grasp() @@ -534,6 +534,11 @@ def _grasp(self, obj): yield from self._move_hand(approach_pose) # motion_constraint=[0, 0, 0, 0, 0, 1] yield from self._execute_grasp() + # Move hand upwards a little to avoid getting stuck + indented_print("Moving hand upwards") + upward_pose = (self.robot.get_eef_position() - object_direction * 0.05, self.robot.get_eef_orientation()) + yield from self._move_hand(upward_pose) + # Step a few times to update yield from self._settle_robot() From 071baa31bf0ff1f1572e6bbded068c61115129bb Mon Sep 17 00:00:00 2001 From: hang-yin Date: Tue, 12 Nov 2024 15:44:17 -0800 Subject: [PATCH 13/76] Add reset robot for arms and torso --- .../starter_semantic_action_primitives.py | 32 +++++++++++++++---- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 29604cb12..e1ebb9cab 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -288,9 +288,8 @@ def apply_ref(self, primitive, *args, attempts=5): # # pass try: - # Make sure we retract the arm after every step - for arm in self.robot.arm_names: - yield from self._reset_hand(arm) + # Make sure we retract the arms after every step + yield from self._reset_robot() except ActionPrimitiveError: pass @@ -553,11 +552,8 @@ def _grasp(self, obj): # TODO: reset density back when releasing obj_in_hand.root_link.density = 1.0 - indented_print("Moving hands back") - # TODO: reset both hands with one call + indented_print("Moving hand back") yield from self._reset_hand(self.arm) - # for arm in self.robot.arm_names: - # yield from self._reset_hand(arm) indented_print("Done with grasp") @@ -1396,6 +1392,28 @@ def _empty_action(self, follow_arm_targets=True): action[action_idx] = partial_action return action + def _reset_robot(self, attached_obj=None): + """ + Yields action to move both hands to the position optimal for executing subsequent action primitives + + Returns: + th.tensor or None: Action array for one step for the robot to reset its hands or None if it is done resetting + """ + target_pos = dict() + target_quat = dict() + for arm in self.robot.arm_names: + reset_eef_pose = self._get_reset_eef_pose("world")[arm] + target_pos[self.robot.eef_link_names[arm]] = reset_eef_pose[0] + target_quat[self.robot.eef_link_names[arm]] = reset_eef_pose[1] + q_traj = self._plan_joint_motion( + target_pos=target_pos, + target_quat=target_quat, + embodiment_selection=CuroboEmbodimentSelection.ARM, + attached_obj=attached_obj, + ).cpu() + indented_print(f"Plan has {len(q_traj)} steps") + yield from self._execute_motion_plan(q_traj) + def _reset_hand(self, arm=None, attached_obj=None): """ Yields action to move the hand to the position optimal for executing subsequent action primitives From 7ba80540ba9f656ead90ba2a3347d12f9474d356 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Wed, 13 Nov 2024 14:25:25 -0800 Subject: [PATCH 14/76] Fix head tracking --- .../action_primitives/starter_semantic_action_primitives.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index e1ebb9cab..a85a241ae 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -1345,7 +1345,7 @@ def _get_head_goal_q(self, target_obj_pose): head1_joint_goal = default_head_pos[0] head2_joint_goal = default_head_pos[1] - return [head1_joint_goal, head2_joint_goal] + return th.tensor([head1_joint_goal, head2_joint_goal]) def _empty_action(self, follow_arm_targets=True): """ From 6b8b6703312bb57c486f3ec8bb0a60ac76e31b26 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Wed, 13 Nov 2024 16:19:21 -0800 Subject: [PATCH 15/76] Disable eef tracking by default; add low precision mode for motion plan execution --- .../starter_semantic_action_primitives.py | 29 ++++++++++++++----- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index a85a241ae..8a27cb980 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -96,6 +96,7 @@ m.TIAGO_TORSO_FIXED = False m.JOINT_POS_DIFF_THRESHOLD = 0.01 +m.LOW_PRECISION_JOINT_POS_DIFF_THRESHOLD = 0.1 m.JOINT_CONTROL_MIN_ACTION = 0.0 m.MAX_ALLOWED_JOINT_ERROR_FOR_LINEAR_MOTION = math.radians(45) m.TIME_BEFORE_JOINT_STUCK_CHECK = 1.0 @@ -129,7 +130,7 @@ def __init__( robot, enable_head_tracking=True, # TODO: fix this later - always_track_eef=True, + always_track_eef=False, task_relevant_objects_only=False, planning_batch_size=3, collision_check_batch_size=5, @@ -536,7 +537,7 @@ def _grasp(self, obj): # Move hand upwards a little to avoid getting stuck indented_print("Moving hand upwards") upward_pose = (self.robot.get_eef_position() - object_direction * 0.05, self.robot.get_eef_orientation()) - yield from self._move_hand(upward_pose) + yield from self._move_hand(upward_pose, low_precision=True) # Step a few times to update yield from self._settle_robot() @@ -789,7 +790,15 @@ def _ik_solver_cartesian_to_joint_space(self, relative_target_pose, arm=None): return joint_pos - def _move_hand(self, target_pose, avoid_collision=True, arm=None, attached_obj=None, motion_constraint=None): + def _move_hand( + self, + target_pose, + avoid_collision=True, + arm=None, + attached_obj=None, + motion_constraint=None, + low_precision=False, + ): """ Yields action for the robot to move hand so the eef is in the target pose using the planner @@ -845,7 +854,9 @@ def _move_hand(self, target_pose, avoid_collision=True, arm=None, attached_obj=N single_traj[self._manipulation_control_idx(arm)] = arm_joint_pos q_traj = [single_traj] indented_print(f"Plan has {len(q_traj)} steps") - yield from self._execute_motion_plan(q_traj, stop_on_contact=avoid_collision is False) + yield from self._execute_motion_plan( + q_traj, stop_on_contact=avoid_collision is False, low_precision=low_precision + ) def _plan_joint_motion( self, @@ -895,7 +906,7 @@ def _plan_joint_motion( return self._motion_generator.path_to_joint_trajectory(traj_path, embodiment_selection) - def _execute_motion_plan(self, q_traj, stop_on_contact=False, ignore_failure=False): + def _execute_motion_plan(self, q_traj, stop_on_contact=False, ignore_failure=False, low_precision=False): for i, joint_pos in enumerate(q_traj): indented_print(f"Executing motion plan step {i + 1}/{len(q_traj)}") # Convert target joint positions to command @@ -912,6 +923,9 @@ def _execute_motion_plan(self, q_traj, stop_on_contact=False, ignore_failure=Fal joint_pos_diff = joint_pos - current_joint_pos base_joint_diff = joint_pos_diff[self.robot.base_control_idx] articulation_joint_diff = joint_pos_diff[articulation_control_idx] # Gets all non-base joints + articulation_threshold = ( + m.JOINT_POS_DIFF_THRESHOLD if not low_precision else m.LOW_PRECISION_JOINT_POS_DIFF_THRESHOLD + ) if th.max(th.abs(articulation_joint_diff)).item() < m.JOINT_POS_DIFF_THRESHOLD: articulation_target_reached = True # TODO: genralize this to transaltion&rotation + high/low precision modes @@ -1412,7 +1426,7 @@ def _reset_robot(self, attached_obj=None): attached_obj=attached_obj, ).cpu() indented_print(f"Plan has {len(q_traj)} steps") - yield from self._execute_motion_plan(q_traj) + yield from self._execute_motion_plan(q_traj, low_precision=True) def _reset_hand(self, arm=None, attached_obj=None): """ @@ -1428,7 +1442,7 @@ def _reset_hand(self, arm=None, attached_obj=None): reset_eef_pose = self._get_reset_eef_pose("world")[arm] if self.debug_visual_marker is not None: self.debug_visual_marker.set_position_orientation(*reset_eef_pose) - yield from self._move_hand(reset_eef_pose, arm=arm, attached_obj=attached_obj) + yield from self._move_hand(reset_eef_pose, arm=arm, attached_obj=attached_obj, low_precision=True) def _get_reset_eef_pose(self, frame="robot"): """ @@ -1550,7 +1564,6 @@ def _navigate_to_pose(self, pose_2d): # TODO: Change curobo implementation so that base motion plannning take a 2D pose instead of 3D? pose_3d = self._get_robot_pose_from_2d_pose(pose_2d) # TODO: change defualt to 0.0? Why is it 0.1? - # TODO: do we still need high/low precision modes? pose_3d[0][2] = 0.0 if self.debug_visual_marker is not None: self.debug_visual_marker.set_position_orientation(*pose_3d) From ec5d68b17081fb0190c587a12a9bd5112495581c Mon Sep 17 00:00:00 2001 From: hang-yin Date: Fri, 15 Nov 2024 13:40:27 -0800 Subject: [PATCH 16/76] Support for arm setting --- .../starter_semantic_action_primitives.py | 42 ++++++++++++------- 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 8a27cb980..9114a1201 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -42,7 +42,8 @@ m = create_module_macros(module_path=__file__) -m.DEFAULT_BODY_OFFSET_FROM_FLOOR = 0.01 +# TODO: figure out why this was 0.01 +m.DEFAULT_BODY_OFFSET_FROM_FLOOR = 0.0 m.KP_LIN_VEL = { Tiago: 0.3, @@ -176,6 +177,7 @@ def __init__( self._enable_head_tracking = enable_head_tracking self._always_track_eef = always_track_eef self._tracking_object = None + self._arm = self.robot.default_arm # Store the current position of the arm as the arm target control_dict = self.robot.get_control_dict() @@ -202,7 +204,15 @@ def __init__( def arm(self): if not isinstance(self.robot, ManipulationRobot): raise ValueError("Cannot use arm for non-manipulation robot") - return self.robot.default_arm + return self._arm + + @arm.setter + def arm(self, value): + if not isinstance(self.robot, ManipulationRobot): + raise ValueError("Cannot use arm for non-manipulation robot") + if value not in self.robot.arm_names: + raise ValueError(f"Invalid arm name: {value}. Must be one of {self.robot.arm_names}") + self._arm = value def _postprocess_action(self, action): """Postprocesses action by applying head tracking.""" @@ -1623,8 +1633,8 @@ def _navigate_to_obj(self, obj, pose_on_obj=None, **kwargs): Yields action to navigate the robot to be in range of the pose Args: - obj (StatefulObject): object to be in range of - pose_on_obj (Iterable): (pos, quat) pose + obj (StatefulObject or list of StatefulObject): object(s) to be in range of + pose_on_obj (Iterable or list of Iterable): (pos, quat) Pose(s) Returns: th.tensor or None: Action array for one step for the robot to navigate in range or None if it is done navigating @@ -1742,8 +1752,9 @@ def _sample_pose_near_object(self, obj, pose_on_obj=None, **kwargs): Returns a 2d pose for the robot within in the range of the object and where the robot is not in collision with anything Args: - obj (StatefulObject): Object to sample a 2d pose near - pose_on_obj (Iterable of arrays or None): The pose to sample near + obj (StatefulObject or list of StatefulObject): object(s) to sample a 2d pose near + pose_on_obj (Iterable or list of Iterable): (pos, quat) Pose(s) to sample near. + If provided, must match the length of obj list if obj is a list Returns: 2-tuple: @@ -1751,7 +1762,7 @@ def _sample_pose_near_object(self, obj, pose_on_obj=None, **kwargs): - 4-array: (x,y,z,w) Quaternion orientation in the world frame """ # TODO: make this a macro - distance_lo, distance_hi = 0.0, 1.0 + distance_lo, distance_hi = 0.0, 1.5 yaw_lo, yaw_hi = -math.pi, math.pi avg_arm_workspace_range = th.mean(self.robot.arm_workspace_range[self.arm]) @@ -1765,7 +1776,6 @@ def _sample_pose_near_object(self, obj, pose_on_obj=None, **kwargs): attempt = 0 while attempt < m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT: - candidate_poses = [] for _ in range(self._collision_check_batch_size): while True: @@ -1906,19 +1916,21 @@ def _sample_pose_with_object_and_predicate( def _validate_poses(self, candidate_poses, pose_on_obj=None, arm=None): """ - Determines whether the robot can reach the pose on the object and is not in collision at the specified 2d poses + Determines whether the robot can reach all poses on the objects and is not in collision at the specified 2d poses Args: - list of arrays: Candidate 2d poses (x, y, yaw) - pose_on_obj (Iterable of arrays): Pose on the object in the world frame + candidate_poses (list of arrays): Candidate 2d poses (x, y, yaw) + pose_on_obj (Iterable of arrays or list of Iterables): Pose(s) on the object(s) in the world frame. + Can be a single pose or list of poses. Returns: - list of bool: Whether the robot can reach the pose on the object and is not in collision at the specified 2d poses + list of bool: Whether any robot arm can reach all poses on the objects and is not in collision + at the specified 2d poses """ if arm is None: arm = self.arm - # result = [False] * len(candidate_poses) + # First check collisions for all candidate poses candidate_joint_positions = [] current_joint_pos = self.robot.get_joint_positions() for pose in candidate_poses: @@ -1926,13 +1938,13 @@ def _validate_poses(self, candidate_poses, pose_on_obj=None, arm=None): joint_pos[self.robot.base_idx[:2]] = pose[:2] joint_pos[self.robot.base_idx[3:]] = th.tensor([0.0, 0.0, pose[2]]) candidate_joint_positions.append(joint_pos) + candidate_joint_positions = th.stack(candidate_joint_positions) invalid_results = self._motion_generator.check_collisions( candidate_joint_positions, check_self_collision=False ).cpu() - # Grab the candidates that passed that collision check and check if they are in reach - # TODO: iteratively call target_in_reach with LULA for now; change to curobo batched checking later for speed + # For each candidate that passed collision check, verify reachability for i in range(len(candidate_poses)): if invalid_results[i].item(): continue From 4466ee5bf81e44b2898c65bc167327ff38c2669d Mon Sep 17 00:00:00 2001 From: hang-yin Date: Mon, 18 Nov 2024 15:45:48 -0800 Subject: [PATCH 17/76] Robustify grasp primitive --- .../starter_semantic_action_primitives.py | 165 ++++++++++-------- omnigibson/utils/grasping_planning_utils.py | 5 +- 2 files changed, 92 insertions(+), 78 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 9114a1201..0f3c502c0 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -86,7 +86,7 @@ m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_IN_ROOM = 60 m.PREDICATE_SAMPLING_Z_OFFSET = 0.02 -m.GRASP_APPROACH_DISTANCE = 0.2 +m.GRASP_APPROACH_DISTANCE = 0.01 m.OPEN_GRASP_APPROACH_DISTANCE = 0.4 m.HAND_DIST_THRESHOLD = 0.002 @@ -511,13 +511,17 @@ def _grasp(self, obj): grasp_poses = get_grasp_poses_for_object_sticky(obj) grasp_pose, object_direction = random.choice(grasp_poses) - # Take into account the robot eef reset pose + grasp_offset_in_z = self.robot.finger_lengths[self.arm] + m.GRASP_APPROACH_DISTANCE + + # Adjust grasp pose with reset orientation and finger length offset reset_orientation = self._get_reset_eef_pose("world")[self.arm][1] - grasp_pose = (grasp_pose[0], T.quat_multiply(grasp_pose[1], reset_orientation)) + grasp_pos = grasp_pose[0] - object_direction * grasp_offset_in_z + grasp_quat = T.quat_multiply(grasp_pose[1], reset_orientation) + grasp_pose = (grasp_pos, grasp_quat) # Prepare data for the approach later. # TODO: fix this threshold - approach_pos = grasp_pose[0] + object_direction * 0.055 # m.GRASP_APPROACH_DISTANCE + approach_pos = grasp_pose[0] + object_direction * grasp_offset_in_z approach_pose = (approach_pos, grasp_pose[1]) # If the grasp pose is too far, navigate. @@ -525,30 +529,23 @@ def _grasp(self, obj): yield from self._navigate_if_needed(obj, pose_on_obj=grasp_pose) indented_print("Moving hand to grasp pose") - yield from self._move_hand(grasp_pose, motion_constraint=[0, 0, 0, 0, 0, 0]) + yield from self._move_hand(grasp_pose, motion_constraint=[0, 0, 0, 0, 1, 0]) if self.robot.grasping_mode == "sticky": # Pre-grasp in sticky grasping mode. indented_print("Pregrasp squeeze") yield from self._execute_grasp() - - if self._get_obj_in_hand() is None: - # Since the grasp pose is slightly off the object, we want to move towards the object, around 5cm. - # It's okay if we can't go all the way because we run into the object. - indented_print("Performing grasp approach") - # Use direct IK to move the hand to the approach pose. - yield from self._move_hand(approach_pose, avoid_collision=False) + # Since the grasp pose is slightly off the object, we want to move towards the object, around 5cm. + # It's okay if we can't go all the way because we run into the object. + indented_print("Performing grasp approach") + # Use direct IK to move the hand to the approach pose. + yield from self._move_hand(approach_pose, avoid_collision=False) elif self.robot.grasping_mode == "assisted": indented_print("Performing grasp approach") # TODO: implement linear cartesian motion with curobo constrained planning yield from self._move_hand(approach_pose) # motion_constraint=[0, 0, 0, 0, 0, 1] yield from self._execute_grasp() - # Move hand upwards a little to avoid getting stuck - indented_print("Moving hand upwards") - upward_pose = (self.robot.get_eef_position() - object_direction * 0.05, self.robot.get_eef_orientation()) - yield from self._move_hand(upward_pose, low_precision=True) - # Step a few times to update yield from self._settle_robot() @@ -859,14 +856,13 @@ def _move_hand( ).cpu() else: # Move EEF directly without collsion checking - arm_joint_pos = self._convert_cartesian_to_joint_space(target_pose, arm=arm) - single_traj = self.robot.get_joint_positions() - single_traj[self._manipulation_control_idx(arm)] = arm_joint_pos - q_traj = [single_traj] + goal_arm_joint_pos = self._convert_cartesian_to_joint_space(target_pose, arm=arm) + curr_joint_pos = self.robot.get_joint_positions() + goal_joint_pos = curr_joint_pos.clone() + goal_joint_pos[self._manipulation_control_idx(arm)] = goal_arm_joint_pos + q_traj = th.stack(self._add_linearly_interpolated_waypoints(plan=[curr_joint_pos, goal_joint_pos])) indented_print(f"Plan has {len(q_traj)} steps") - yield from self._execute_motion_plan( - q_traj, stop_on_contact=avoid_collision is False, low_precision=low_precision - ) + yield from self._execute_motion_plan(q_traj, stop_on_contact=not avoid_collision, low_precision=low_precision) def _plan_joint_motion( self, @@ -916,56 +912,60 @@ def _plan_joint_motion( return self._motion_generator.path_to_joint_trajectory(traj_path, embodiment_selection) - def _execute_motion_plan(self, q_traj, stop_on_contact=False, ignore_failure=False, low_precision=False): + def _execute_motion_plan( + self, q_traj, stop_on_contact=False, ignore_failure=False, low_precision=False, ignore_physics=False + ): for i, joint_pos in enumerate(q_traj): indented_print(f"Executing motion plan step {i + 1}/{len(q_traj)}") - # Convert target joint positions to command - action = self._q_to_action(joint_pos) - - base_target_reached = False - articulation_target_reached = False - collision_detected = False - articulation_control_idx = th.cat( - (self.robot.arm_control_idx["left"], self.robot.arm_control_idx["right"], self.robot.trunk_control_idx) - ) - for _ in range(m.MAX_STEPS_FOR_JOINT_MOTION): - current_joint_pos = self.robot.get_joint_positions() - joint_pos_diff = joint_pos - current_joint_pos - base_joint_diff = joint_pos_diff[self.robot.base_control_idx] - articulation_joint_diff = joint_pos_diff[articulation_control_idx] # Gets all non-base joints - articulation_threshold = ( - m.JOINT_POS_DIFF_THRESHOLD if not low_precision else m.LOW_PRECISION_JOINT_POS_DIFF_THRESHOLD - ) - if th.max(th.abs(articulation_joint_diff)).item() < m.JOINT_POS_DIFF_THRESHOLD: - articulation_target_reached = True - # TODO: genralize this to transaltion&rotation + high/low precision modes - distance_threshold = ( - m.LOW_PRECISION_DIST_THRESHOLD if isinstance(self.robot, Tiago) else m.DEFAULT_DIST_THRESHOLD - ) - if th.max(th.abs(base_joint_diff)).item() < distance_threshold: - base_target_reached = True - if base_target_reached and articulation_target_reached: - break - # TODO: bring this back once we sort out the controller overshoot problem - # collision_detected = detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False) - # if stop_on_contact and collision_detected: - # break - yield self._postprocess_action(action) - if not stop_on_contact and not ignore_failure: - if not base_target_reached: - raise ActionPrimitiveError( - ActionPrimitiveError.Reason.EXECUTION_ERROR, - "Could not reach the target base joint positions. Try again", + if ignore_physics: + self.robot.set_joint_positions(joint_pos) + og.sim.step() + else: + # Convert target joint positions to command + action = self._q_to_action(joint_pos) + + base_target_reached = False + articulation_target_reached = False + collision_detected = False + articulation_control_idx = th.cat( + ( + self.robot.arm_control_idx["left"], + self.robot.arm_control_idx["right"], + self.robot.trunk_control_idx, ) - if not articulation_target_reached: - raise ActionPrimitiveError( - ActionPrimitiveError.Reason.EXECUTION_ERROR, - "Could not reach the target articulation joint positions. Try again", + ) + for _ in range(m.MAX_STEPS_FOR_JOINT_MOTION): + current_joint_pos = self.robot.get_joint_positions() + joint_pos_diff = joint_pos - current_joint_pos + base_joint_diff = joint_pos_diff[self.robot.base_control_idx] + articulation_joint_diff = joint_pos_diff[articulation_control_idx] # Gets all non-base joints + articulation_threshold = ( + m.JOINT_POS_DIFF_THRESHOLD if not low_precision else m.LOW_PRECISION_JOINT_POS_DIFF_THRESHOLD ) - # elif collision_detected: - # # TODO: figure out the logic between collision and failure - # break + if th.max(th.abs(articulation_joint_diff)).item() < m.JOINT_POS_DIFF_THRESHOLD: + articulation_target_reached = True + # TODO: genralize this to transaltion&rotation + high/low precision modes + if th.max(th.abs(base_joint_diff)).item() < m.DEFAULT_DIST_THRESHOLD: + base_target_reached = True + if base_target_reached and articulation_target_reached: + break + collision_detected = detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=True) + if stop_on_contact and collision_detected: + return + yield self._postprocess_action(action) + + if not ignore_failure: + if not base_target_reached: + raise ActionPrimitiveError( + ActionPrimitiveError.Reason.EXECUTION_ERROR, + "Could not reach the target base joint positions. Try again", + ) + if not articulation_target_reached: + raise ActionPrimitiveError( + ActionPrimitiveError.Reason.EXECUTION_ERROR, + "Could not reach the target articulation joint positions. Try again", + ) def _q_to_action(self, q): action = [] @@ -975,7 +975,7 @@ def _q_to_action(self, q): assert action.shape[0] == self.robot.action_dim return action - def _add_linearly_interpolated_waypoints(self, plan, max_inter_dist): + def _add_linearly_interpolated_waypoints(self, plan, max_inter_dist=0.01): """ Adds waypoints to the plan so the distance between values in the plan never exceeds the max_inter_dist. @@ -986,13 +986,13 @@ def _add_linearly_interpolated_waypoints(self, plan, max_inter_dist): Returns: Array of arrays: Planned path with additional waypoints """ - plan = th.tensor(plan) + assert len(plan) > 1, "Plan must have at least 2 waypoints to interpolate" interpolated_plan = [] for i in range(len(plan) - 1): max_diff = max(plan[i + 1] - plan[i]) num_intervals = math.ceil(max_diff / max_inter_dist) - interpolated_plan += th.linspace(plan[i], plan[i + 1], num_intervals, endpoint=False).tolist() - interpolated_plan.append(plan[-1].tolist()) + interpolated_plan += multi_dim_linspace(plan[i], plan[i + 1], num_intervals) + interpolated_plan.append(plan[-1]) return interpolated_plan def _move_hand_direct_joint(self, joint_pos, stop_on_contact=False, ignore_failure=False): @@ -1483,14 +1483,25 @@ def _get_reset_eef_pose(self, frame="robot"): ), } elif isinstance(self.robot, Tiago): + # TODO: default trunk position vs. raised trunk position + # pose = { + # self.robot.arm_names[0]: ( + # th.tensor([0.4997, 0.2497, 0.6357]), + # th.tensor([-0.5609, 0.5617, 0.4299, 0.4302]), + # ), + # self.robot.arm_names[1]: ( + # th.tensor([0.4978, -0.2521, 0.6357]), + # th.tensor([-0.5609, -0.5617, 0.4299, -0.4302]), + # ), + # } pose = { self.robot.arm_names[0]: ( - th.tensor([0.4997, 0.2497, 0.6357]), - th.tensor([-0.5609, 0.5617, 0.4299, 0.4302]), + th.tensor([0.5021, 0.2458, 0.7648]), + th.tensor([-0.5599, 0.5649, 0.4303, 0.4269]), ), self.robot.arm_names[1]: ( - th.tensor([0.4978, -0.2521, 0.6357]), - th.tensor([-0.5609, -0.5617, 0.4299, -0.4302]), + th.tensor([0.4999, -0.2486, 0.7633]), + th.tensor([-0.5592, -0.5646, 0.4311, -0.4274]), ), } else: diff --git a/omnigibson/utils/grasping_planning_utils.py b/omnigibson/utils/grasping_planning_utils.py index ffb9692e2..e9274f6e7 100644 --- a/omnigibson/utils/grasping_planning_utils.py +++ b/omnigibson/utils/grasping_planning_utils.py @@ -32,7 +32,10 @@ def get_grasp_poses_for_object_sticky(target_obj): visual=False ) - grasp_center_pos = bbox_center_in_world + th.tensor([0, 0, th.max(bbox_extent_in_base_frame) / 2 + 0.05]) + # TODO: why base frame aabb? + bbox_extent_in_world_frame = target_obj.aabb_extent + + grasp_center_pos = bbox_center_in_world + th.tensor([0, 0, bbox_extent_in_world_frame[2] / 2]) towards_object_in_world_frame = bbox_center_in_world - grasp_center_pos towards_object_in_world_frame /= th.norm(towards_object_in_world_frame) From 6e5682b40e6e2ea7267b832aa6129c7bf54a5058 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Mon, 18 Nov 2024 16:29:20 -0800 Subject: [PATCH 18/76] Add arm locking mode --- .../starter_semantic_action_primitives.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 0f3c502c0..0d3ecc8c0 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -1,7 +1,7 @@ """ WARNING! The StarterSemanticActionPrimitive is a work-in-progress and is only provided as an example. -It currently only works with Fetch and Tiago with their JointControllers set to delta mode. +It currently only works with Fetch, Tiago, and R1 with their JointControllers set to 1. absolute mode 2. position control with impedance. See provided tiago_primitives.yaml config file for an example. See examples/action_primitives for runnable examples. """ @@ -95,7 +95,7 @@ m.LOW_PRECISION_DIST_THRESHOLD = 0.1 m.LOW_PRECISION_ANGLE_THRESHOLD = 0.2 -m.TIAGO_TORSO_FIXED = False +m.TORSO_FIXED = False m.JOINT_POS_DIFF_THRESHOLD = 0.01 m.LOW_PRECISION_JOINT_POS_DIFF_THRESHOLD = 0.1 m.JOINT_CONTROL_MIN_ACTION = 0.0 @@ -755,7 +755,7 @@ def _manipulation_control_idx(self, arm=None): if arm == "left": return ( self.robot.arm_control_idx["left"] - if m.TIAGO_TORSO_FIXED + if m.TORSO_FIXED else th.cat([self.robot.trunk_control_idx, self.robot.arm_control_idx["left"]]) ) else: @@ -805,6 +805,7 @@ def _move_hand( attached_obj=None, motion_constraint=None, low_precision=False, + lock_auxiliary_arm=False, ): """ Yields action for the robot to move hand so the eef is in the target pose using the planner @@ -827,7 +828,7 @@ def _move_hand( attached_obj = {self.robot.eef_link_names[arm]: obj_in_hand.root_link} yield from self._settle_robot() # curobo motion generator takes a pose but outputs joint positions - if isinstance(self.robot, Tiago) and not m.TIAGO_TORSO_FIXED: + if not lock_auxiliary_arm: target_pos = { self.robot.eef_link_names[self.arm]: target_pose[0], } From 267fad053cfca1519fbfc78dd29e53d843e8ada9 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Mon, 18 Nov 2024 17:30:51 -0800 Subject: [PATCH 19/76] Subsumed pull request #934 --- .../starter_semantic_action_primitives.py | 25 +++----- omnigibson/configs/tiago_primitives.yaml | 2 +- omnigibson/robots/r1.py | 57 ++++++++++++++++++- 3 files changed, 65 insertions(+), 19 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 0d3ecc8c0..89c6624dd 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -1,7 +1,7 @@ """ WARNING! The StarterSemanticActionPrimitive is a work-in-progress and is only provided as an example. -It currently only works with Fetch, Tiago, and R1 with their JointControllers set to 1. absolute mode 2. position control with impedance. +It currently only works with Fetch, Tiago, and R1 with their JointControllers set to absolute position mode with impedance. See provided tiago_primitives.yaml config file for an example. See examples/action_primitives for runnable examples. """ @@ -150,7 +150,7 @@ def __init__( """ log.warning( "The StarterSemanticActionPrimitive is a work-in-progress and is only provided as an example. " - "It currently only works with Fetch and Tiago with their JointControllers set to delta mode." + "It currently only works with Fetch, Tiago, and R1 with their JointControllers set to absolute position mode with impedance." ) super().__init__(robot) self._motion_generator = CuRoboMotionGenerator(robot=self.robot, batch_size=planning_batch_size) @@ -750,21 +750,13 @@ def _manipulation_control_idx(self, arm=None): """The appropriate manipulation control idx for the current settings.""" if arm is None: arm = self.arm - # TODO: look into this - if isinstance(self.robot, Tiago): - if arm == "left": - return ( - self.robot.arm_control_idx["left"] - if m.TORSO_FIXED - else th.cat([self.robot.trunk_control_idx, self.robot.arm_control_idx["left"]]) - ) - else: - return self.robot.arm_control_idx["right"] - if isinstance(self.robot, Fetch): - return th.cat([self.robot.trunk_control_idx, self.robot.arm_control_idx[self.arm]]) - # Otherwise just return the default arm control idx - return self.robot.arm_control_idx[arm] + if m.TORSO_FIXED or arm == "right": + return self.robot.arm_control_idx[arm] + else: + # Only append trunk control idx if it is not fixed and the arm is left + if hasattr(self.robot, "trunk_control_idx"): + return th.cat([self.robot.trunk_control_idx, self.robot.arm_control_idx[arm]]) def _ik_solver_cartesian_to_joint_space(self, relative_target_pose, arm=None): """ @@ -780,6 +772,7 @@ def _ik_solver_cartesian_to_joint_space(self, relative_target_pose, arm=None): """ if arm is None: arm = self.arm + # TODO: make sure descriptor file match torso & arm setting ik_solver = IKSolver( robot_description_path=self.robot.robot_arm_descriptor_yamls[arm], robot_urdf_path=self.robot.urdf_path, diff --git a/omnigibson/configs/tiago_primitives.yaml b/omnigibson/configs/tiago_primitives.yaml index 44af0a663..66d614148 100644 --- a/omnigibson/configs/tiago_primitives.yaml +++ b/omnigibson/configs/tiago_primitives.yaml @@ -32,7 +32,7 @@ scene: robots: - type: Tiago - obs_modalities: [rgb, depth, seg_semantic, normal, seg_instance, seg_instance_id] + obs_modalities: [rgb] scale: 1.0 self_collisions: true action_normalize: false diff --git a/omnigibson/robots/r1.py b/omnigibson/robots/r1.py index b7364b7b2..403c8fe8c 100644 --- a/omnigibson/robots/r1.py +++ b/omnigibson/robots/r1.py @@ -202,6 +202,33 @@ def assisted_grasp_end_points(self): def trunk_joint_names(self): return [f"torso_joint{i}" for i in range(1, 5)] + @property + def manipulation_link_names(self): + return [ + "torso_link1", + "torso_link2", + "torso_link3", + "torso_link4", + "left_arm_link1", + "left_arm_link2", + "left_arm_link3", + "left_arm_link4", + "left_arm_link5", + "left_arm_link6", + "left_gripper_link1", + "left_gripper_link2", + "left_hand", + "right_arm_link1", + "right_arm_link2", + "right_arm_link3", + "right_arm_link4", + "right_arm_link5", + "right_arm_link6", + "right_gripper_link1", + "right_gripper_link2", + "right_hand", + ] + @classproperty def n_arms(cls): return 2 @@ -259,7 +286,7 @@ def urdf_path(self): @property def arm_workspace_range(self): - return {arm: th.tensor([th.deg2rad(th.tensor(-45)), th.deg2rad(th.tensor(45))]) for arm in self.arm_names} + return {arm: th.deg2rad(th.tensor([-45, 45], dtype=th.float32)) for arm in self.arm_names} @property def eef_usd_path(self): @@ -271,7 +298,33 @@ def disabled_collision_pairs(self): return [ ["left_gripper_link1", "left_gripper_link2"], ["right_gripper_link1", "right_gripper_link2"], + ["base_link", "torso_link1"], + ["base_link", "servo_link1"], + ["base_link", "wheel_link3"], + ["base_link", "servo_link3"], + ["base_link", "servo_link2"], ["base_link", "wheel_link1"], ["base_link", "wheel_link2"], - ["base_link", "wheel_link3"], + ["servo_link1", "wheel_link1"], + ["servo_link2", "wheel_link2"], + ["servo_link3", "wheel_link3"], + ["torso_link1", "torso_link2"], + ["torso_link2", "torso_link3"], + ["torso_link3", "torso_link4"], + ["torso_link4", "left_arm_link1"], + ["left_arm_link1", "left_arm_link2"], + ["left_arm_link2", "left_arm_link3"], + ["left_arm_link3", "left_arm_link4"], + ["left_arm_link4", "left_arm_link5"], + ["left_arm_link5", "left_arm_link6"], + ["left_arm_link6", "left_gripper_link1"], + ["left_arm_link6", "left_gripper_link2"], + ["torso_link4", "right_arm_link1"], + ["right_arm_link1", "right_arm_link2"], + ["right_arm_link2", "right_arm_link3"], + ["right_arm_link3", "right_arm_link4"], + ["right_arm_link4", "right_arm_link5"], + ["right_arm_link5", "right_arm_link6"], + ["right_arm_link6", "right_gripper_link1"], + ["right_arm_link6", "right_gripper_link2"], ] From b4c83c794550ef42cb103979e0084b7db602e7cc Mon Sep 17 00:00:00 2001 From: hang-yin Date: Wed, 20 Nov 2024 16:38:21 -0800 Subject: [PATCH 20/76] Make primitives faster and more robust --- omnigibson/action_primitives/curobo.py | 15 +++++-- .../starter_semantic_action_primitives.py | 43 ++++++++++-------- omnigibson/tasks/grasp_task.py | 25 +++++------ omnigibson/utils/motion_planning_utils.py | 44 +++++++++---------- 4 files changed, 69 insertions(+), 58 deletions(-) diff --git a/omnigibson/action_primitives/curobo.py b/omnigibson/action_primitives/curobo.py index 3b3d186b1..b7176698f 100644 --- a/omnigibson/action_primitives/curobo.py +++ b/omnigibson/action_primitives/curobo.py @@ -228,7 +228,7 @@ def __init__( num_trajopt_seeds=4, num_graph_seeds=4, interpolation_dt=0.03, - collision_activation_distance=0.005, + collision_activation_distance=0.05, self_collision_check=True, maximum_trajectory_dt=None, fixed_iters_trajopt=True, @@ -336,6 +336,7 @@ def check_collisions( self, q, check_self_collision=True, + skip_obstacle_update=False, ): """ Checks collisions between the sphere representation of the robot and the rest of the current scene @@ -353,7 +354,8 @@ def check_collisions( emb_sel = CuroboEmbodimentSelection.DEFAULT # Update obstacles - self.update_obstacles() + if not skip_obstacle_update: + self.update_obstacles() q_pos = self.robot.get_joint_positions().unsqueeze(0) cu_joint_state = lazy.curobo.types.state.JointState( @@ -430,6 +432,7 @@ def compute_trajectories( attached_obj=None, motion_constraint=None, attached_obj_scale=None, + skip_obstacle_update=False, emb_sel=CuroboEmbodimentSelection.DEFAULT, ): """ @@ -518,7 +521,8 @@ def compute_trajectories( ) plan_cfg.pose_cost_metric = pose_cost_metric - self.update_obstacles() + if not skip_obstacle_update: + self.update_obstacles() for link_name in target_pos.keys(): target_pos_link = target_pos[link_name] @@ -589,9 +593,12 @@ def compute_trajectories( object_names=obj_paths, ee_pose=ee_pose, link_name=self.robot.curobo_attached_object_link_names[ee_link_name], - scale=0.99 if attached_obj_scale is None else attached_obj_scale[ee_link_name], + scale=1.0 if attached_obj_scale is None else attached_obj_scale[ee_link_name], pitch_scale=1.0, merge_meshes=True, + world_objects_pose_offset=lazy.curobo.types.math.Pose.from_list( + [0, 0, 0.01, 1, 0, 0, 0], self._tensor_args + ), ) all_rollout_fns = [ diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 89c6624dd..9df4968e0 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -659,6 +659,8 @@ def _place_with_predicate(self, obj, predicate): directly_move_hand_pose = None while len(pose_candidates) < 20: obj_pose = self._sample_pose_with_object_and_predicate(predicate, obj_in_hand, obj) + # TODO: Look into this orientation, why do we need to sample it? + obj_pose = (obj_pose[0], obj_in_hand.get_position_orientation()[1]) hand_pose = self._get_hand_pose_for_object_pose(obj_pose) if self._target_in_reach_of_robot(hand_pose)[self.arm]: directly_move_hand_pose = hand_pose @@ -678,6 +680,13 @@ def _place_with_predicate(self, obj, predicate): yield from self._move_hand(candidate) break + if valid_navigation_pose is None: + raise ActionPrimitiveError( + ActionPrimitiveError.Reason.PLANNING_ERROR, + "Could not find a valid pose to place the object", + {"target object": obj.name}, + ) + yield from self._execute_release() if self._get_obj_in_hand() is not None: @@ -814,11 +823,6 @@ def _move_hand( if arm is None: arm = self.arm if avoid_collision: - # If an object is grasped, we need to pass it to the motion planner - obj_in_hand = self._get_obj_in_hand() - if obj_in_hand is not None: - # TODO: this root link logic is bad, fix it - attached_obj = {self.robot.eef_link_names[arm]: obj_in_hand.root_link} yield from self._settle_robot() # curobo motion generator takes a pose but outputs joint positions if not lock_auxiliary_arm: @@ -845,7 +849,6 @@ def _move_hand( target_pos=target_pos, target_quat=target_quat, embodiment_selection=CuroboEmbodimentSelection.ARM, - attached_obj=attached_obj, motion_constraint=motion_constraint, ).cpu() else: @@ -866,6 +869,12 @@ def _plan_joint_motion( attached_obj=None, motion_constraint=None, ): + if attached_obj is None: + # If an object is grasped, we need to pass it to the motion planner + obj_in_hand = self._get_obj_in_hand() + if obj_in_hand is not None: + # TODO: this root link logic is bad, fix it + attached_obj = {self.robot.eef_link_names[self.arm]: obj_in_hand.root_link} planning_attempts = 0 success = False traj_path = None @@ -874,7 +883,9 @@ def _plan_joint_motion( target_quat = { k: th.stack([v for _ in range(self._motion_generator.batch_size)]) for k, v in target_quat.items() } + # TODO: call curobo with batch_size > 1 instead of iterating + self._motion_generator.update_obstacles() while not success and planning_attempts < m.MAX_PLANNING_ATTEMPTS: successes, traj_paths = self._motion_generator.compute_trajectories( target_pos=target_pos, @@ -889,6 +900,7 @@ def _plan_joint_motion( success_ratio=1.0, attached_obj=attached_obj, motion_constraint=motion_constraint, + skip_obstacle_update=True, emb_sel=embodiment_selection, ) # Grab the first successful trajectory, if not found, then continue planning @@ -1780,6 +1792,8 @@ def _sample_pose_near_object(self, obj, pose_on_obj=None, **kwargs): ) attempt = 0 + # Update obstacle once before sampling + self._motion_generator.update_obstacles() while attempt < m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT: candidate_poses = [] for _ in range(self._collision_check_batch_size): @@ -1800,7 +1814,7 @@ def _sample_pose_near_object(self, obj, pose_on_obj=None, **kwargs): break candidate_poses.append(candidate_2d_pose) - result = self._validate_poses(candidate_poses, pose_on_obj=target_pose, **kwargs) + result = self._validate_poses(candidate_poses, pose_on_obj=target_pose, skip_obstacle_update=True, **kwargs) # If anything in result is true, return the pose for i, res in enumerate(result): @@ -1809,16 +1823,7 @@ def _sample_pose_near_object(self, obj, pose_on_obj=None, **kwargs): return candidate_poses[i] attempt += self._collision_check_batch_size - - raise ActionPrimitiveError( - ActionPrimitiveError.Reason.SAMPLING_ERROR, - "Could not find valid position near object.", - { - "target object": obj.name, - "target pos": obj.get_position_orientation()[0], - "pose on target": pose_on_obj, - }, - ) + return None @staticmethod def _sample_position_on_aabb_side(target_obj): @@ -1919,7 +1924,7 @@ def _sample_pose_with_object_and_predicate( {"target object": target_obj.name, "object in hand": held_obj.name, "relation": pred_map[predicate]}, ) - def _validate_poses(self, candidate_poses, pose_on_obj=None, arm=None): + def _validate_poses(self, candidate_poses, pose_on_obj=None, arm=None, skip_obstacle_update=False): """ Determines whether the robot can reach all poses on the objects and is not in collision at the specified 2d poses @@ -1946,7 +1951,7 @@ def _validate_poses(self, candidate_poses, pose_on_obj=None, arm=None): candidate_joint_positions = th.stack(candidate_joint_positions) invalid_results = self._motion_generator.check_collisions( - candidate_joint_positions, check_self_collision=False + candidate_joint_positions, check_self_collision=False, skip_obstacle_update=skip_obstacle_update ).cpu() # For each candidate that passed collision check, verify reachability diff --git a/omnigibson/tasks/grasp_task.py b/omnigibson/tasks/grasp_task.py index 40198119c..ecd82324d 100644 --- a/omnigibson/tasks/grasp_task.py +++ b/omnigibson/tasks/grasp_task.py @@ -6,8 +6,7 @@ import omnigibson as og import omnigibson.utils.transform_utils as T -from omnigibson.action_primitives.starter_semantic_action_primitives import ( - PlanningContext, +from omnigibson.action_primitives.starter_semantic_action_primitives import ( # PlanningContext, StarterSemanticActionPrimitives, ) from omnigibson.macros import gm @@ -94,7 +93,7 @@ def _reset_agent(self, env): # Otherwise, reset using the primitive controller. else: if self._primitive_controller is None: - self._primitive_controller = StarterSemanticActionPrimitives(env, enable_head_tracking=False) + self._primitive_controller = StarterSemanticActionPrimitives(robot, enable_head_tracking=False) # Randomize the robots joint positions joint_control_idx = th.cat([robot.trunk_control_idx, robot.arm_control_idx[robot.default_arm]]) @@ -109,16 +108,16 @@ def _reset_agent(self, env): initial_joint_pos = th.tensor(robot.get_joint_positions()[joint_control_idx]) control_idx_in_joint_pos = th.arange(dim) - with PlanningContext( - env, self._primitive_controller.robot, self._primitive_controller.robot_copy, "original" - ) as context: - for _ in range(MAX_JOINT_RANDOMIZATION_ATTEMPTS): - joint_pos, joint_control_idx = self._get_random_joint_position(robot) - initial_joint_pos[control_idx_in_joint_pos] = joint_pos - if not set_arm_and_detect_collision(context, initial_joint_pos): - robot.set_joint_positions(joint_pos, joint_control_idx) - og.sim.step() - break + # with PlanningContext( + # env, self._primitive_controller.robot, self._primitive_controller.robot_copy, "original" + # ) as context: + # for _ in range(MAX_JOINT_RANDOMIZATION_ATTEMPTS): + # joint_pos, joint_control_idx = self._get_random_joint_position(robot) + # initial_joint_pos[control_idx_in_joint_pos] = joint_pos + # if not set_arm_and_detect_collision(context, initial_joint_pos): + # robot.set_joint_positions(joint_pos, joint_control_idx) + # og.sim.step() + # break # Randomize the robot's 2d pose obj = env.scene.object_registry("name", self.obj_name) diff --git a/omnigibson/utils/motion_planning_utils.py b/omnigibson/utils/motion_planning_utils.py index 3184a71d2..d456b9bd3 100644 --- a/omnigibson/utils/motion_planning_utils.py +++ b/omnigibson/utils/motion_planning_utils.py @@ -429,28 +429,28 @@ def state_valid_fn(q, verbose=False): return None -def set_base_and_detect_collision(context, pose, verbose=False): - """ - Moves the robot and detects robot collisions with the environment and itself - - Args: - context (PlanningContext): Context to plan in that includes the robot copy - pose (Array): Pose in the world frame to check for collisions at - verbose (bool): Whether the collision detector should output information about collisions or not. The verbose mode is too noisy in sampling so it is default to False - - Returns: - bool: Whether the robot is in collision - """ - # make a copy of the robot, set it to the goal pose, and check for possible collision - robot_copy = context.robot_copy - robot_copy_type = context.robot_copy_type - - translation = lazy.pxr.Gf.Vec3d(pose[0].tolist()) - robot_copy.prims[robot_copy_type].GetAttribute("xformOp:translate").Set(translation) - - orientation = pose[1][[3, 0, 1, 2]] - robot_copy.prims[robot_copy_type].GetAttribute("xformOp:orient").Set(lazy.pxr.Gf.Quatd(*orientation.tolist())) - return detect_robot_collision(context, verbose=verbose) +# def set_base_and_detect_collision(context, pose, verbose=False): +# """ +# Moves the robot and detects robot collisions with the environment and itself + +# Args: +# context (PlanningContext): Context to plan in that includes the robot copy +# pose (Array): Pose in the world frame to check for collisions at +# verbose (bool): Whether the collision detector should output information about collisions or not. The verbose mode is too noisy in sampling so it is default to False + +# Returns: +# bool: Whether the robot is in collision +# """ +# # make a copy of the robot, set it to the goal pose, and check for possible collision +# robot_copy = context.robot_copy +# robot_copy_type = context.robot_copy_type + +# translation = lazy.pxr.Gf.Vec3d(pose[0].tolist()) +# robot_copy.prims[robot_copy_type].GetAttribute("xformOp:translate").Set(translation) + +# orientation = pose[1][[3, 0, 1, 2]] +# robot_copy.prims[robot_copy_type].GetAttribute("xformOp:orient").Set(lazy.pxr.Gf.Quatd(*orientation.tolist())) +# return detect_robot_collision(context, verbose=verbose) def set_arm_and_detect_collision(context, joint_pos, verbose=False): From c7629d381a3720459cc42b61c146193e54b7fdfc Mon Sep 17 00:00:00 2001 From: hang-yin Date: Fri, 22 Nov 2024 15:15:10 -0800 Subject: [PATCH 21/76] Quick fix for ag state --- omnigibson/robots/manipulation_robot.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/omnigibson/robots/manipulation_robot.py b/omnigibson/robots/manipulation_robot.py index f3f6d55ff..53dba1097 100644 --- a/omnigibson/robots/manipulation_robot.py +++ b/omnigibson/robots/manipulation_robot.py @@ -1504,6 +1504,8 @@ def _load_state(self, state): # Include AG_state # TODO: currently does not take care of cloth objects # TODO: add unit tests + if "ag_obj_constraint_params" not in state: + return for arm in state["ag_obj_constraint_params"].keys(): if len(state["ag_obj_constraint_params"][arm]) > 0: data = state["ag_obj_constraint_params"][arm] From f24e4b999641c47613d515587a7f7aea1b61ce82 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Tue, 3 Dec 2024 13:56:12 -0800 Subject: [PATCH 22/76] Primitives clean up --- .../action_primitive_set_base.py | 11 +-- omnigibson/action_primitives/curobo.py | 27 ------ .../starter_semantic_action_primitives.py | 83 +++++++++---------- omnigibson/tasks/grasp_task.py | 25 +++--- omnigibson/utils/grasping_planning_utils.py | 16 ++-- omnigibson/utils/motion_planning_utils.py | 44 +++++----- 6 files changed, 84 insertions(+), 122 deletions(-) diff --git a/omnigibson/action_primitives/action_primitive_set_base.py b/omnigibson/action_primitives/action_primitive_set_base.py index a5cf53261..424a911a2 100644 --- a/omnigibson/action_primitives/action_primitive_set_base.py +++ b/omnigibson/action_primitives/action_primitive_set_base.py @@ -58,13 +58,14 @@ def __init_subclass__(cls, **kwargs): if not inspect.isabstract(cls): REGISTERED_PRIMITIVE_SETS[cls.__name__] = cls - def __init__(self, robot): + def __init__(self, env, robot): + self.env = env self.robot = robot - # @abstractmethod - # def get_action_space(self): - # """Get the higher-level action space as an OpenAI Gym Space object.""" - # pass + @abstractmethod + def get_action_space(self): + """Get the higher-level action space as an OpenAI Gym Space object.""" + pass @abstractmethod def apply(self, action): diff --git a/omnigibson/action_primitives/curobo.py b/omnigibson/action_primitives/curobo.py index b7176698f..abe5e6861 100644 --- a/omnigibson/action_primitives/curobo.py +++ b/omnigibson/action_primitives/curobo.py @@ -767,33 +767,6 @@ def convert_q_to_eef_traj(self, traj, return_axisangle=False, emb_sel=CuroboEmbo return th.concatenate([positions, orientations], dim=-1) - def solve_ik(self, target_pos, target_quat, emb_sel=CuroboEmbodimentSelection.ARM): - # # If target_pos and target_quat are torch tensors, it's assumed that they correspond to the default ee_link - # if isinstance(target_pos, th.Tensor): - # target_pos = {self.ee_link[emb_sel]: target_pos} - # if isinstance(target_quat, th.Tensor): - # target_quat = {self.ee_link[emb_sel]: target_quat} - - # # Refresh the collision state - # self.update_obstacles(ignore_paths=None, emb_sel=emb_sel) - - # assert target_pos.keys() == target_quat.keys(), "Expected target_pos and target_quat to have the same keys!" - - # # TODO: fill these in - # solve_state = self.mg[emb_sel]._get_solve_state(lazy.curobo.ReacherSolveType.GOALSET, plan_config, goal_pose, start_state) - - # ik_result = self.mg[emb_sel]._solve_ik_from_solve_state( - # goal_pose=, - # solve_state=solve_state, - # start_state=, - # use_nn_seed=False, - # partial_ik_opt=False, - # link_poses=, - # ) - - # breakpoint() - return - @property def tensor_args(self): """ diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 9df4968e0..b9ea34035 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -1,7 +1,7 @@ """ WARNING! The StarterSemanticActionPrimitive is a work-in-progress and is only provided as an example. -It currently only works with Fetch, Tiago, and R1 with their JointControllers set to absolute position mode with impedance. +It currently only works with Tiago and R1 with their JointControllers set to absolute position mode with impedance. See provided tiago_primitives.yaml config file for an example. See examples/action_primitives for runnable examples. """ @@ -42,7 +42,6 @@ m = create_module_macros(module_path=__file__) -# TODO: figure out why this was 0.01 m.DEFAULT_BODY_OFFSET_FROM_FLOOR = 0.0 m.KP_LIN_VEL = { @@ -84,6 +83,7 @@ m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_WITH_OBJECT_AND_PREDICATE = 20 m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT = 100 m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_IN_ROOM = 60 +m.MAX_ATTEMPTS_FOR_SAMPLING_PLACE_POSE = 20 m.PREDICATE_SAMPLING_Z_OFFSET = 0.02 m.GRASP_APPROACH_DISTANCE = 0.01 @@ -128,9 +128,9 @@ class StarterSemanticActionPrimitiveSet(IntEnum): class StarterSemanticActionPrimitives(BaseActionPrimitiveSet): def __init__( self, + env, robot, enable_head_tracking=True, - # TODO: fix this later always_track_eef=False, task_relevant_objects_only=False, planning_batch_size=3, @@ -141,18 +141,22 @@ def __init__( Initializes a StarterSemanticActionPrimitives generator. Args: + env (Environment): The environment that the primitives will run on. robot (BaseRobot): The robot that the primitives will run on. enable_head_tracking (bool): Whether to enable head tracking. Defaults to True. always_track_eef (bool, optional): Whether to always track the end effector, as opposed to switching between target object and end effector based on context. Defaults to False. task_relevant_objects_only (bool): Whether to only consider objects relevant to the task when computing the action space. Defaults to False. + planning_batch_size (int): The batch size for curobo motion planning. Defaults to 3. + collision_check_batch_size (int): The batch size for curobo collision checking. Defaults to 5. + debug_visual_marker (PrimitiveObject): The object to use for debug visual markers. Defaults to None. """ log.warning( "The StarterSemanticActionPrimitive is a work-in-progress and is only provided as an example. " - "It currently only works with Fetch, Tiago, and R1 with their JointControllers set to absolute position mode with impedance." + "It currently only works with Tiago and R1 with their JointControllers set to absolute position mode with impedance." ) - super().__init__(robot) + super().__init__(env, robot) self._motion_generator = CuRoboMotionGenerator(robot=self.robot, batch_size=planning_batch_size) self.controller_functions = { StarterSemanticActionPrimitiveSet.GRASP: self._grasp, @@ -220,23 +224,23 @@ def _postprocess_action(self, action): action = self._overwrite_head_action(action) return action - # def get_action_space(self): - # # TODO: Figure out how to implement what happens when the set of objects in scene changes. - # if self._task_relevant_objects_only: - # assert isinstance( - # self.env.task, BehaviorTask - # ), "Activity relevant objects can only be used for BEHAVIOR tasks" - # self.addressable_objects = sorted(set(self.env.task.object_scope.values()), key=lambda obj: obj.name) - # else: - # self.addressable_objects = sorted(set(self.env.scene.objects_by_name.values()), key=lambda obj: obj.name) - - # # Filter out the robots. - # self.addressable_objects = [obj for obj in self.addressable_objects if not isinstance(obj, BaseRobot)] - - # self.num_objects = len(self.addressable_objects) - # return gym.spaces.Tuple( - # [gym.spaces.Discrete(self.num_objects), gym.spaces.Discrete(len(StarterSemanticActionPrimitiveSet))] - # ) + def get_action_space(self): + # TODO: Figure out how to implement what happens when the set of objects in scene changes. + if self._task_relevant_objects_only: + assert isinstance( + self.env.task, BehaviorTask + ), "Activity relevant objects can only be used for BEHAVIOR tasks" + self.addressable_objects = sorted(set(self.env.task.object_scope.values()), key=lambda obj: obj.name) + else: + self.addressable_objects = sorted(set(self.env.scene.objects_by_name.values()), key=lambda obj: obj.name) + + # Filter out the robots. + self.addressable_objects = [obj for obj in self.addressable_objects if not isinstance(obj, BaseRobot)] + + self.num_objects = len(self.addressable_objects) + return gym.spaces.Tuple( + [gym.spaces.Discrete(self.num_objects), gym.spaces.Discrete(len(StarterSemanticActionPrimitiveSet))] + ) def get_action_from_primitive_and_object(self, primitive: StarterSemanticActionPrimitiveSet, obj: BaseObject): assert obj in self.addressable_objects @@ -291,12 +295,12 @@ def apply_ref(self, primitive, *args, attempts=5): except ActionPrimitiveError as e: errors.append(e) - # # try: - # # # If we're not holding anything, release the hand so it doesn't stick to anything else. - # # if not self._get_obj_in_hand(): - # # yield from self._execute_release() - # # except ActionPrimitiveError: - # # pass + try: + # If we're not holding anything, release the hand so it doesn't stick to anything else. + if not self._get_obj_in_hand(): + yield from self._execute_release() + except ActionPrimitiveError: + pass try: # Make sure we retract the arms after every step @@ -520,7 +524,6 @@ def _grasp(self, obj): grasp_pose = (grasp_pos, grasp_quat) # Prepare data for the approach later. - # TODO: fix this threshold approach_pos = grasp_pose[0] + object_direction * grasp_offset_in_z approach_pose = (approach_pos, grasp_pose[1]) @@ -542,8 +545,7 @@ def _grasp(self, obj): yield from self._move_hand(approach_pose, avoid_collision=False) elif self.robot.grasping_mode == "assisted": indented_print("Performing grasp approach") - # TODO: implement linear cartesian motion with curobo constrained planning - yield from self._move_hand(approach_pose) # motion_constraint=[0, 0, 0, 0, 0, 1] + yield from self._move_hand(approach_pose, motion_constraint=[0, 0, 0, 0, 0, 1]) yield from self._execute_grasp() # Step a few times to update @@ -657,9 +659,9 @@ def _place_with_predicate(self, obj, predicate): # Sample location to place object pose_candidates = [] directly_move_hand_pose = None - while len(pose_candidates) < 20: + while len(pose_candidates) < m.MAX_ATTEMPTS_FOR_SAMPLING_PLACE_POSE: obj_pose = self._sample_pose_with_object_and_predicate(predicate, obj_in_hand, obj) - # TODO: Look into this orientation, why do we need to sample it? + # TODO: why do we sample this orientation? obj_pose = (obj_pose[0], obj_in_hand.get_position_orientation()[1]) hand_pose = self._get_hand_pose_for_object_pose(obj_pose) if self._target_in_reach_of_robot(hand_pose)[self.arm]: @@ -884,7 +886,6 @@ def _plan_joint_motion( k: th.stack([v for _ in range(self._motion_generator.batch_size)]) for k, v in target_quat.items() } - # TODO: call curobo with batch_size > 1 instead of iterating self._motion_generator.update_obstacles() while not success and planning_attempts < m.MAX_PLANNING_ATTEMPTS: successes, traj_paths = self._motion_generator.compute_trajectories( @@ -1254,7 +1255,7 @@ def _move_fingers_to_limit(self, limit_type): """ q = self.robot.get_joint_positions() joint_names = list(self.robot.joints.keys()) - for finger_joints in self.robot.finger_joints.values(): + for finger_joints in self.robot.finger_joints[self.arm]: for finger_joint in finger_joints: idx = joint_names.index(finger_joint.joint_name) q[idx] = getattr(finger_joint, f"{limit_type}_limit") @@ -1587,10 +1588,8 @@ def _navigate_to_pose(self, pose_2d): Returns: th.tensor or None: Action array for one step for the robot to navigate or None if it is done navigating """ - - # TODO: Change curobo implementation so that base motion plannning take a 2D pose instead of 3D? pose_3d = self._get_robot_pose_from_2d_pose(pose_2d) - # TODO: change defualt to 0.0? Why is it 0.1? + # TODO: why was this 0.1? pose_3d[0][2] = 0.0 if self.debug_visual_marker is not None: self.debug_visual_marker.set_position_orientation(*pose_3d) @@ -1600,7 +1599,6 @@ def _navigate_to_pose(self, pose_2d): q_traj = self._plan_joint_motion(target_pos, target_quat, CuroboEmbodimentSelection.BASE).cpu() yield from self._execute_motion_plan(q_traj, stop_on_contact=True) - # TODO: implement this for curobo? def _draw_plan(self, plan): SEARCHED = [] trav_map = self.robot.scene._trav_map @@ -1735,11 +1733,6 @@ def _rotate_in_place(self, end_pose, angle_threshold=m.DEFAULT_ANGLE_THRESHOLD): action = self._empty_action() - # TODO: test to see if we still need this - # if isinstance(self.robot, Locobot) or isinstance(self.robot, Freight): - # # Locobot and Freight wheel joints are reversed - # ang_vel = -ang_vel - base_action = action[self.robot.controller_action_idx["base"]] assert ( @@ -1779,7 +1772,7 @@ def _sample_pose_near_object(self, obj, pose_on_obj=None, **kwargs): - 4-array: (x,y,z,w) Quaternion orientation in the world frame """ # TODO: make this a macro - distance_lo, distance_hi = 0.0, 1.5 + distance_lo, distance_hi = 0.0, 2.0 yaw_lo, yaw_hi = -math.pi, math.pi avg_arm_workspace_range = th.mean(self.robot.arm_workspace_range[self.arm]) diff --git a/omnigibson/tasks/grasp_task.py b/omnigibson/tasks/grasp_task.py index ecd82324d..12d787f4f 100644 --- a/omnigibson/tasks/grasp_task.py +++ b/omnigibson/tasks/grasp_task.py @@ -6,9 +6,7 @@ import omnigibson as og import omnigibson.utils.transform_utils as T -from omnigibson.action_primitives.starter_semantic_action_primitives import ( # PlanningContext, - StarterSemanticActionPrimitives, -) +from omnigibson.action_primitives.starter_semantic_action_primitives import StarterSemanticActionPrimitives from omnigibson.macros import gm from omnigibson.objects.object_base import REGISTERED_OBJECTS from omnigibson.reward_functions.grasp_reward import GraspReward @@ -18,7 +16,6 @@ from omnigibson.termination_conditions.grasp_goal import GraspGoal from omnigibson.termination_conditions.timeout import Timeout from omnigibson.utils.grasping_planning_utils import get_grasp_poses_for_object_sticky -from omnigibson.utils.motion_planning_utils import set_arm_and_detect_collision from omnigibson.utils.python_utils import classproperty, create_class_from_registry_and_config from omnigibson.utils.sim_utils import land_object @@ -108,16 +105,16 @@ def _reset_agent(self, env): initial_joint_pos = th.tensor(robot.get_joint_positions()[joint_control_idx]) control_idx_in_joint_pos = th.arange(dim) - # with PlanningContext( - # env, self._primitive_controller.robot, self._primitive_controller.robot_copy, "original" - # ) as context: - # for _ in range(MAX_JOINT_RANDOMIZATION_ATTEMPTS): - # joint_pos, joint_control_idx = self._get_random_joint_position(robot) - # initial_joint_pos[control_idx_in_joint_pos] = joint_pos - # if not set_arm_and_detect_collision(context, initial_joint_pos): - # robot.set_joint_positions(joint_pos, joint_control_idx) - # og.sim.step() - # break + for _ in range(MAX_JOINT_RANDOMIZATION_ATTEMPTS): + joint_pos, joint_control_idx = self._get_random_joint_position(robot) + initial_joint_pos[control_idx_in_joint_pos] = joint_pos + collision_detected = self._primitive_controller._motion_generator.check_collisions( + [initial_joint_pos], check_self_collision=False + ).cpu()[0] + if not collision_detected: + robot.set_joint_positions(joint_pos, joint_control_idx) + og.sim.step() + break # Randomize the robot's 2d pose obj = env.scene.object_registry("name", self.obj_name) diff --git a/omnigibson/utils/grasping_planning_utils.py b/omnigibson/utils/grasping_planning_utils.py index e9274f6e7..6dfa46f7e 100644 --- a/omnigibson/utils/grasping_planning_utils.py +++ b/omnigibson/utils/grasping_planning_utils.py @@ -28,19 +28,17 @@ def get_grasp_poses_for_object_sticky(target_obj): Returns: List of grasp candidates, where each grasp candidate is a tuple containing the grasp pose and the approach direction. """ - bbox_center_in_world, bbox_quat_in_world, bbox_extent_in_base_frame, _ = target_obj.get_base_aligned_bbox( - visual=False - ) - # TODO: why base frame aabb? - bbox_extent_in_world_frame = target_obj.aabb_extent + aabb_min_world, aabb_max_world = target_obj.aabb - grasp_center_pos = bbox_center_in_world + th.tensor([0, 0, bbox_extent_in_world_frame[2] / 2]) - towards_object_in_world_frame = bbox_center_in_world - grasp_center_pos + bbox_center_world = (aabb_min_world + aabb_max_world) / 2 + bbox_extent_world = aabb_max_world - aabb_min_world + + grasp_center_pos = bbox_center_world + th.tensor([0, 0, bbox_extent_world[2] / 2]) + towards_object_in_world_frame = bbox_center_world - grasp_center_pos towards_object_in_world_frame /= th.norm(towards_object_in_world_frame) - # TODO: figure out why this was pi/2 in the y-axis - # grasp_quat = T.euler2quat(th.tensor([0, math.pi / 2, 0], dtype=th.float32)) + # TODO: why was this pi/2 in the y-axis? grasp_quat = T.euler2quat(th.tensor([0, 0, 0], dtype=th.float32)) grasp_pose = (grasp_center_pos, grasp_quat) diff --git a/omnigibson/utils/motion_planning_utils.py b/omnigibson/utils/motion_planning_utils.py index d456b9bd3..3184a71d2 100644 --- a/omnigibson/utils/motion_planning_utils.py +++ b/omnigibson/utils/motion_planning_utils.py @@ -429,28 +429,28 @@ def state_valid_fn(q, verbose=False): return None -# def set_base_and_detect_collision(context, pose, verbose=False): -# """ -# Moves the robot and detects robot collisions with the environment and itself - -# Args: -# context (PlanningContext): Context to plan in that includes the robot copy -# pose (Array): Pose in the world frame to check for collisions at -# verbose (bool): Whether the collision detector should output information about collisions or not. The verbose mode is too noisy in sampling so it is default to False - -# Returns: -# bool: Whether the robot is in collision -# """ -# # make a copy of the robot, set it to the goal pose, and check for possible collision -# robot_copy = context.robot_copy -# robot_copy_type = context.robot_copy_type - -# translation = lazy.pxr.Gf.Vec3d(pose[0].tolist()) -# robot_copy.prims[robot_copy_type].GetAttribute("xformOp:translate").Set(translation) - -# orientation = pose[1][[3, 0, 1, 2]] -# robot_copy.prims[robot_copy_type].GetAttribute("xformOp:orient").Set(lazy.pxr.Gf.Quatd(*orientation.tolist())) -# return detect_robot_collision(context, verbose=verbose) +def set_base_and_detect_collision(context, pose, verbose=False): + """ + Moves the robot and detects robot collisions with the environment and itself + + Args: + context (PlanningContext): Context to plan in that includes the robot copy + pose (Array): Pose in the world frame to check for collisions at + verbose (bool): Whether the collision detector should output information about collisions or not. The verbose mode is too noisy in sampling so it is default to False + + Returns: + bool: Whether the robot is in collision + """ + # make a copy of the robot, set it to the goal pose, and check for possible collision + robot_copy = context.robot_copy + robot_copy_type = context.robot_copy_type + + translation = lazy.pxr.Gf.Vec3d(pose[0].tolist()) + robot_copy.prims[robot_copy_type].GetAttribute("xformOp:translate").Set(translation) + + orientation = pose[1][[3, 0, 1, 2]] + robot_copy.prims[robot_copy_type].GetAttribute("xformOp:orient").Set(lazy.pxr.Gf.Quatd(*orientation.tolist())) + return detect_robot_collision(context, verbose=verbose) def set_arm_and_detect_collision(context, joint_pos, verbose=False): From a997b28b8e55f0b7e9248a68871cd84eec6723fa Mon Sep 17 00:00:00 2001 From: hang-yin Date: Tue, 3 Dec 2024 17:00:34 -0800 Subject: [PATCH 23/76] More primitives refactoring --- .../starter_semantic_action_primitives.py | 22 +++-- omnigibson/configs/r1_primitives.yaml | 90 +++++++++++++++++++ omnigibson/configs/tiago_primitives.yaml | 64 +++++++++++-- .../action_primitives/rs_int_example.py | 34 +++++-- .../action_primitives/solve_simple_task.py | 19 +++- .../wip_solve_behavior_task.py | 2 +- omnigibson/tasks/grasp_task.py | 2 +- tests/test_primitives.py | 2 +- 8 files changed, 209 insertions(+), 26 deletions(-) create mode 100644 omnigibson/configs/r1_primitives.yaml diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index b9ea34035..1b59a0638 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -67,7 +67,7 @@ R1: 0.2, } -m.MAX_PLANNING_ATTEMPTS = 50 +m.MAX_PLANNING_ATTEMPTS = 20 m.MAX_STEPS_FOR_SETTLING = 500 @@ -559,7 +559,8 @@ def _grasp(self, obj): "Grasp completed, but no object detected in hand after executing grasp", {"target object": obj.name}, ) - # TODO: reset density back when releasing + + # TODO: ag force seems to not be enough to keep the object in hand. need to investigate obj_in_hand.root_link.density = 1.0 indented_print("Moving hand back") @@ -670,6 +671,7 @@ def _place_with_predicate(self, obj, predicate): else: pose_candidates.append(hand_pose) + valid_navigation_pose = None if directly_move_hand_pose is not None: yield from self._move_hand(directly_move_hand_pose) else: @@ -937,8 +939,7 @@ def _execute_motion_plan( collision_detected = False articulation_control_idx = th.cat( ( - self.robot.arm_control_idx["left"], - self.robot.arm_control_idx["right"], + self.robot.arm_control_idx[self.arm], self.robot.trunk_control_idx, ) ) @@ -1255,10 +1256,9 @@ def _move_fingers_to_limit(self, limit_type): """ q = self.robot.get_joint_positions() joint_names = list(self.robot.joints.keys()) - for finger_joints in self.robot.finger_joints[self.arm]: - for finger_joint in finger_joints: - idx = joint_names.index(finger_joint.joint_name) - q[idx] = getattr(finger_joint, f"{limit_type}_limit") + for finger_joint in self.robot.finger_joints[self.arm]: + idx = joint_names.index(finger_joint.joint_name) + q[idx] = getattr(finger_joint, f"{limit_type}_limit") action = self._q_to_action(q) finger_joint_limits = getattr(self.robot, f"joint_{limit_type}_limits")[ self.robot.gripper_control_idx[self.arm] @@ -1655,6 +1655,12 @@ def _navigate_to_obj(self, obj, pose_on_obj=None, **kwargs): th.tensor or None: Action array for one step for the robot to navigate in range or None if it is done navigating """ pose = self._sample_pose_near_object(obj, pose_on_obj=pose_on_obj, **kwargs) + if pose is None: + raise ActionPrimitiveError( + ActionPrimitiveError.Reason.PLANNING_ERROR, + "Could not find a valid pose near the object", + {"object": obj.name}, + ) yield from self._navigate_to_pose(pose) def _navigate_to_pose_direct(self, pose_2d, low_precision=False): diff --git a/omnigibson/configs/r1_primitives.yaml b/omnigibson/configs/r1_primitives.yaml new file mode 100644 index 000000000..f7b42c9d4 --- /dev/null +++ b/omnigibson/configs/r1_primitives.yaml @@ -0,0 +1,90 @@ +env: + action_frequency: 30 # (int): environment executes action at the action_frequency rate + physics_frequency: 120 # (int): physics frequency (1 / physics_timestep for physx) + device: null # (None or str): specifies the device to be used if running on the gpu with torch backend + automatic_reset: false # (bool): whether to automatic reset after an episode finishes + flatten_action_space: false # (bool): whether to flatten the action space as a sinle 1D-array + flatten_obs_space: false # (bool): whether the observation space should be flattened when generated + use_external_obs: false # (bool): Whether to use external observations or not + external_sensors: null # (None or list): If specified, list of sensor configurations for external sensors to add. Should specify sensor "type" and any additional kwargs to instantiate the sensor. Each entry should be the kwargs passed to @create_sensor, in addition to position, orientation + +render: + viewer_width: 1280 + viewer_height: 720 + +scene: + type: InteractiveTraversableScene + scene_model: Rs_int + trav_map_resolution: 0.1 + default_erosion_radius: 0.0 + trav_map_with_objects: true + num_waypoints: 1 + waypoint_resolution: 0.2 + load_object_categories: null + not_load_object_categories: null + load_room_types: null + load_room_instances: null + load_task_relevant_only: false + seg_map_resolution: 0.1 + scene_source: OG + include_robots: false + +robots: + - type: R1 + obs_modalities: [rgb] + scale: 1.0 + self_collisions: true + action_normalize: false + action_type: continuous + grasping_mode: sticky + sensor_config: + VisionSensor: + sensor_kwargs: + image_height: 128 + image_width: 128 + controller_config: + base: + name: JointController + motor_type: position + command_input_limits: null + use_delta_commands: false + use_impedances: true + pos_kp: 50 + trunk: + name: JointController + motor_type: position + command_input_limits: null + use_delta_commands: false + use_impedances: true + arm_left: + name: JointController + # subsume_controllers: [trunk] + motor_type: position + command_input_limits: null + use_delta_commands: false + use_impedances: true + arm_right: + name: JointController + motor_type: position + command_input_limits: null + use_delta_commands: false + use_impedances: true + gripper_left: + name: JointController + motor_type: position + command_input_limits: null + use_delta_commands: false + use_impedances: true + gripper_right: + name: JointController + motor_type: position + command_input_limits: null + use_delta_commands: false + use_impedances: true + camera: + name: JointController + +objects: [] + +task: + type: DummyTask \ No newline at end of file diff --git a/omnigibson/configs/tiago_primitives.yaml b/omnigibson/configs/tiago_primitives.yaml index abd36fd53..d3625c8bd 100644 --- a/omnigibson/configs/tiago_primitives.yaml +++ b/omnigibson/configs/tiago_primitives.yaml @@ -38,7 +38,35 @@ robots: action_normalize: false action_type: continuous grasping_mode: sticky - default_arm_pose: vertical + reset_joint_pos: [ + 0.0000, + 0.0000, + -0.0000, + -0.0000, + -0.0000, + -0.0000, + 0.3500, + 0.9052, + 0.9052, + 0.0000, + -0.4281, + -0.4281, + -0.4500, + 2.2350, + 2.2350, + 1.6463, + 1.6463, + 0.7687, + 0.7687, + -0.7946, + -0.7946, + -1.0891, + -1.0891, + 0.0450, + 0.0450, + 0.0450, + 0.0450, + ] sensor_config: VisionSensor: sensor_kwargs: @@ -47,33 +75,53 @@ robots: controller_config: base: name: JointController + motor_type: position + command_input_limits: null + use_delta_commands: false + use_impedances: true + pos_kp: 200 + pos_ki: 0.5 + max_integral_error: 10.0 trunk: name: JointController + motor_type: position + command_input_limits: null + use_delta_commands: false + use_impedances: true + pos_kp: 500 arm_left: name: JointController - subsume_controllers: [trunk] + # subsume_controllers: [trunk] motor_type: position command_input_limits: null - command_output_limits: null use_delta_commands: false + use_impedances: true + pos_kp: 500 + pos_ki: 0.5 + max_integral_error: 10.0 arm_right: name: JointController motor_type: position command_input_limits: null - command_output_limits: null use_delta_commands: false + use_impedances: true + pos_kp: 500 + pos_ki: 0.5 + max_integral_error: 10.0 gripper_left: name: JointController motor_type: position - command_input_limits: [-1, 1] - command_output_limits: null + command_input_limits: null use_delta_commands: false + use_impedances: true + pos_kp: 200 gripper_right: name: JointController motor_type: position - command_input_limits: [-1, 1] - command_output_limits: null + command_input_limits: null use_delta_commands: false + use_impedances: true + pos_kp: 200 camera: name: JointController diff --git a/omnigibson/examples/action_primitives/rs_int_example.py b/omnigibson/examples/action_primitives/rs_int_example.py index c9f856523..9fb7371a3 100644 --- a/omnigibson/examples/action_primitives/rs_int_example.py +++ b/omnigibson/examples/action_primitives/rs_int_example.py @@ -1,5 +1,6 @@ import os +import torch as th import yaml import omnigibson as og @@ -8,6 +9,7 @@ StarterSemanticActionPrimitiveSet, ) from omnigibson.macros import gm +from omnigibson.robots.tiago import Tiago # Don't use GPU dynamics and use flatcache for performance boost # gm.USE_GPU_DYNAMICS = True @@ -26,19 +28,19 @@ def main(): It loads Rs_int with a robot, and the robot picks and places an apple. """ # Load the config - config_filename = os.path.join(og.example_config_path, "fetch_primitives.yaml") + config_filename = os.path.join(og.example_config_path, "tiago_primitives.yaml") config = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader) - # Update it to run a grocery shopping task config["scene"]["scene_model"] = "Rs_int" - config["scene"]["not_load_object_categories"] = ["ceilings"] + config["scene"]["not_load_object_categories"] = ["ceilings", "loudspeaker", "standing_tv", "pot_plant"] + # config["scene"]["load_object_categories"] = ["floors", "breakfast_table", "bottom_cabinet", "walls"] config["objects"] = [ { "type": "DatasetObject", "name": "apple", "category": "apple", "model": "agveuv", - "position": [-0.3, -1.1, 0.5], + "position": [1.2, 0.0, 0.75], "orientation": [0, 0, 0, 1], }, ] @@ -48,11 +50,31 @@ def main(): scene = env.scene robot = env.robots[0] + # Open the gripper(s) to match cuRobo's default state + for arm_name in robot.gripper_control_idx.keys(): + grpiper_control_idx = robot.gripper_control_idx[arm_name] + robot.set_joint_positions(th.ones_like(grpiper_control_idx), indices=grpiper_control_idx, normalized=True) + robot.keep_still() + + for _ in range(5): + og.sim.step() + + env.scene.update_initial_state() + env.scene.reset() + + og.sim.viewer_camera.set_position_orientation( + th.tensor([-0.6084, -3.3571, 1.2033]), th.tensor([0.6349, -0.1778, -0.2027, 0.7240]) + ) + + # Let the object settle + for _ in range(30): + og.sim.step() + # Allow user to move camera more easily og.sim.enable_viewer_camera_teleoperation() - controller = StarterSemanticActionPrimitives(robot, enable_head_tracking=False) - cabinet = scene.object_registry("name", "bottom_cabinet_slgzfc_0") + controller = StarterSemanticActionPrimitives(env, robot, enable_head_tracking=isinstance(robot, Tiago)) + cabinet = scene.object_registry("name", "bottom_cabinet_jhymlr_0") apple = scene.object_registry("name", "apple") # Grasp apple diff --git a/omnigibson/examples/action_primitives/solve_simple_task.py b/omnigibson/examples/action_primitives/solve_simple_task.py index d20a16242..7f2c8fe3b 100644 --- a/omnigibson/examples/action_primitives/solve_simple_task.py +++ b/omnigibson/examples/action_primitives/solve_simple_task.py @@ -1,5 +1,6 @@ import os +import torch as th import yaml import omnigibson as og @@ -57,10 +58,26 @@ def main(): scene = env.scene robot = env.robots[0] + # Open the gripper(s) to match cuRobo's default state + for arm_name in robot.gripper_control_idx.keys(): + grpiper_control_idx = robot.gripper_control_idx[arm_name] + robot.set_joint_positions(th.ones_like(grpiper_control_idx), indices=grpiper_control_idx, normalized=True) + robot.keep_still() + + for _ in range(5): + og.sim.step() + + env.scene.update_initial_state() + env.scene.reset() + + # Let the object settle + for _ in range(30): + og.sim.step() + # Allow user to move camera more easily og.sim.enable_viewer_camera_teleoperation() - controller = StarterSemanticActionPrimitives(robot, enable_head_tracking=False) + controller = StarterSemanticActionPrimitives(env, robot, enable_head_tracking=True) # Grasp of cologne grasp_obj = scene.object_registry("name", "cologne") diff --git a/omnigibson/examples/action_primitives/wip_solve_behavior_task.py b/omnigibson/examples/action_primitives/wip_solve_behavior_task.py index 75202dfe7..a489559ec 100644 --- a/omnigibson/examples/action_primitives/wip_solve_behavior_task.py +++ b/omnigibson/examples/action_primitives/wip_solve_behavior_task.py @@ -52,7 +52,7 @@ def main(): # Allow user to move camera more easily og.sim.enable_viewer_camera_teleoperation() - controller = StarterSemanticActionPrimitives(robot, enable_head_tracking=False) + controller = StarterSemanticActionPrimitives(env, robot, enable_head_tracking=False) # Grasp can of soda grasp_obj = env.task.object_scope["can__of__soda.n.01_2"] diff --git a/omnigibson/tasks/grasp_task.py b/omnigibson/tasks/grasp_task.py index 12d787f4f..7ccd6b123 100644 --- a/omnigibson/tasks/grasp_task.py +++ b/omnigibson/tasks/grasp_task.py @@ -90,7 +90,7 @@ def _reset_agent(self, env): # Otherwise, reset using the primitive controller. else: if self._primitive_controller is None: - self._primitive_controller = StarterSemanticActionPrimitives(robot, enable_head_tracking=False) + self._primitive_controller = StarterSemanticActionPrimitives(env, robot, enable_head_tracking=False) # Randomize the robots joint positions joint_control_idx = th.cat([robot.trunk_control_idx, robot.arm_control_idx[robot.default_arm]]) diff --git a/tests/test_primitives.py b/tests/test_primitives.py index 8b20fc084..460ac61df 100644 --- a/tests/test_primitives.py +++ b/tests/test_primitives.py @@ -76,7 +76,7 @@ def primitive_tester(env, objects, primitives, primitives_args): obj["object"].set_position_orientation(position=obj["position"], orientation=obj["orientation"]) og.sim.step() - controller = StarterSemanticActionPrimitives(env.robots[0], enable_head_tracking=False) + controller = StarterSemanticActionPrimitives(env, env.robots[0], enable_head_tracking=False) try: for primitive, args in zip(primitives, primitives_args): execute_controller(controller.apply_ref(primitive, *args, attempts=1), env) From b93185e561853f349e5e3a66bd23e4e7cdc87b8d Mon Sep 17 00:00:00 2001 From: Chengshu Li Date: Tue, 10 Dec 2024 16:04:20 -0800 Subject: [PATCH 24/76] add IK-only mode for curobo --- omnigibson/action_primitives/curobo.py | 139 ++++++++++++++++++------- 1 file changed, 103 insertions(+), 36 deletions(-) diff --git a/omnigibson/action_primitives/curobo.py b/omnigibson/action_primitives/curobo.py index abe5e6861..b1aa7e55f 100644 --- a/omnigibson/action_primitives/curobo.py +++ b/omnigibson/action_primitives/curobo.py @@ -1,5 +1,4 @@ import math -import os from collections.abc import Iterable from enum import Enum @@ -11,7 +10,6 @@ from omnigibson.macros import create_module_macros from omnigibson.object_states.factory import METALINK_PREFIXES from omnigibson.prims.rigid_prim import RigidPrim -from omnigibson.robots.articulated_trunk_robot import ArticulatedTrunkRobot from omnigibson.robots.holonomic_base_robot import HolonomicBaseRobot from omnigibson.utils.constants import GROUND_CATEGORIES, JointType from omnigibson.utils.control_utils import FKSolver @@ -417,6 +415,75 @@ def update_locked_joints(self, cu_joint_state, emb_sel): link_idx = kc.link_name_to_idx_map[child_link_name] kc.fixed_transforms[link_idx] = relative_pose + def solve_ik_batch( + self, start_state, goal_pose, plan_config, link_poses=None, emb_sel=CuroboEmbodimentSelection.DEFAULT + ): + """Find IK solutions to reach a batch of goal poses from a batch of start joint states. + + Args: + start_state: Start joint states of the robot. When planning from a non-static state, + i.e, when velocity or acceleration is non-zero, set :attr:`MotionGen.optimize_dt` + to False. + goal_pose: Goal poses for the end-effector.ik_ + plan_config: Planning parameters for motion generation. + link_poses: Goal poses for each link in the robot when planning for multiple links. + + Returns: + IKResult: Result of IK solution. Check :attr:`IKResult.success` + attribute to check which indices of the batch were successful. + bool: Whether the IK solution was successful for the batch. + JointState: Joint state of the robot at the goal pose. + """ + solve_state = self.mg[emb_sel]._get_solve_state( + lazy.curobo.wrap.reacher.types.ReacherSolveType.BATCH, plan_config, goal_pose, start_state + ) + result = self.mg[emb_sel]._solve_ik_from_solve_state( + goal_pose, + solve_state, + start_state, + plan_config.use_nn_ik_seed, + plan_config.partial_ik_opt, + link_poses, + ) + # If any of the IK seeds is successful + success = result.success.any(dim=1) + # Set non-successful error to infinity + result.error[~result.success].fill_(float("inf")) + # Get the index of the minimum error + min_error_idx = result.error.argmin(dim=1) + # Get the joint state with the minimum error + joint_state = result.js_solution[range(result.js_solution.shape[0]), min_error_idx] + joint_state = [joint_state[i] for i in range(joint_state.shape[0])] + return result, success, joint_state + + def plan_batch( + self, start_state, goal_pose, plan_config, link_poses=None, emb_sel=CuroboEmbodimentSelection.DEFAULT + ): + """Plan a batch of trajectories from a batch of start joint states to a batch of goal poses. + + Args: + start_state: Start joint states of the robot. When planning from a non-static state, + i.e, when velocity or acceleration is non-zero, set :attr:`MotionGen.optimize_dt` + to False. + goal_pose: Goal poses for the end-effector. + plan_config: Planning parameters for motion generation. + link_poses: Goal poses for each link in the robot when planning for multiple links. + + Returns: + MotionGenResult: Result of IK solution. Check :attr:`MotionGenResult.success` + attribute to check which indices of the batch were successful. + bool: Whether the IK solution was successful for the batch. + JointState: Joint state of the robot at the goal pose. + """ + result = self.mg[emb_sel].plan_batch(start_state, goal_pose, plan_config, link_poses=link_poses) + success = result.success + if result.interpolated_plan is None: + joint_state = [None] * goal_pose.batch + else: + joint_state = result.get_paths() + + return result, success, joint_state + def compute_trajectories( self, target_pos, @@ -433,6 +500,7 @@ def compute_trajectories( motion_constraint=None, attached_obj_scale=None, skip_obstacle_update=False, + ik_only=False, emb_sel=CuroboEmbodimentSelection.DEFAULT, ): """ @@ -466,6 +534,8 @@ def compute_trajectories( link names and the values are the corresponding BaseObject instances to attach to that link attached_obj_scale (None or Dict[str, float]): If specified, a dictionary where the keys are the end-effector link names and the values are the corresponding scale to apply to the attached object + skip_obstacle_update (bool): Whether to skip updating the obstacles in the world collision checker + ik_only (bool): Whether to only run the IK solver and not the trajectory optimization emb_sel (CuroboEmbodimentSelection): Which embodiment selection to use for computing trajectories Returns: 2-tuple or list of MotionGenResult: If @return_full_result is True, will return a list of raw MotionGenResult @@ -489,6 +559,9 @@ def compute_trajectories( # print("Robot is near joint limits! No trajectory will be computed") # return None, None if not return_full_result else None + if not skip_obstacle_update: + self.update_obstacles() + # If target_pos and target_quat are torch tensors, it's assumed that they correspond to the default ee_link if isinstance(target_pos, th.Tensor): target_pos = {self.ee_link[emb_sel]: target_pos} @@ -501,29 +574,6 @@ def compute_trajectories( target_pos = {k: v if len(v.shape) == 2 else v.unsqueeze(0) for k, v in target_pos.items()} target_quat = {k: v if len(v.shape) == 2 else v.unsqueeze(0) for k, v in target_quat.items()} - # Define the plan config - plan_cfg = lazy.curobo.wrap.reacher.motion_gen.MotionGenPlanConfig( - enable_graph=False, - max_attempts=max_attempts, - timeout=timeout, - enable_graph_attempt=None, - ik_fail_return=ik_fail_return, - enable_finetune_trajopt=enable_finetune_trajopt, - finetune_attempts=finetune_attempts, - success_ratio=1.0 / self.batch_size if success_ratio is None else success_ratio, - ) - - # Add the pose cost metric - if motion_constraint is None: - motion_constraint = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] - pose_cost_metric = lazy.curobo.wrap.reacher.motion_gen.PoseCostMetric( - hold_partial_pose=False, hold_vec_weight=self._tensor_args.to_device(motion_constraint) - ) - plan_cfg.pose_cost_metric = pose_cost_metric - - if not skip_obstacle_update: - self.update_obstacles() - for link_name in target_pos.keys(): target_pos_link = target_pos[link_name] target_quat_link = target_quat[link_name] @@ -552,6 +602,26 @@ def compute_trajectories( target_pos[link_name] = target_pos_link target_quat[link_name] = target_quat_link + # Define the plan config + plan_cfg = lazy.curobo.wrap.reacher.motion_gen.MotionGenPlanConfig( + enable_graph=False, + max_attempts=max_attempts, + timeout=timeout, + enable_graph_attempt=None, + ik_fail_return=ik_fail_return, + enable_finetune_trajopt=enable_finetune_trajopt, + finetune_attempts=finetune_attempts, + success_ratio=1.0 / self.batch_size if success_ratio is None else success_ratio, + ) + + # Add the pose cost metric + if motion_constraint is None: + motion_constraint = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + pose_cost_metric = lazy.curobo.wrap.reacher.motion_gen.PoseCostMetric( + hold_partial_pose=False, hold_vec_weight=self._tensor_args.to_device(motion_constraint) + ) + plan_cfg.pose_cost_metric = pose_cost_metric + # Construct initial state q_pos = th.stack([self.robot.get_joint_positions()] * self.batch_size, axis=0) q_vel = th.stack([self.robot.get_joint_velocities()] * self.batch_size, axis=0) @@ -688,21 +758,17 @@ def compute_trajectories( if len(ik_goal_batch_by_link) == 0: ik_goal_batch_by_link = None - result = self.mg[emb_sel].plan_batch( - cu_js_batch, main_ik_goal_batch, plan_cfg, link_poses=ik_goal_batch_by_link + plan_fn = self.plan_batch if not ik_only else self.solve_ik_batch + result, success, joint_state = plan_fn( + cu_js_batch, main_ik_goal_batch, plan_cfg, link_poses=ik_goal_batch_by_link, emb_sel=emb_sel ) if self.debug: breakpoint() # Append results results.append(result) - successes = th.concatenate([successes, result.success[:end_idx]]) - - # If result.interpolated_plan is be None (e.g. IK failure), return Nones - if result.interpolated_plan is None: - paths += [None] * end_idx - else: - paths += result.get_paths()[:end_idx] + successes = th.concatenate([successes, success[:end_idx]]) + paths += joint_state[:end_idx] # Detach attached object if it was attached if attached_obj is not None: @@ -717,12 +783,13 @@ def compute_trajectories( else: return successes, paths - def path_to_joint_trajectory(self, path, emb_sel=CuroboEmbodimentSelection.DEFAULT): + def path_to_joint_trajectory(self, path, get_full_js=True, emb_sel=CuroboEmbodimentSelection.DEFAULT): """ Converts raw path from motion generator into joint trajectory sequence Args: path (JointState): Joint state path to convert into joint trajectory + get_full_js (bool): Whether to get the full joint state emb_sel (CuroboEmbodimentSelection): Which embodiment to use for the robot Returns: @@ -730,7 +797,7 @@ def path_to_joint_trajectory(self, path, emb_sel=CuroboEmbodimentSelection.DEFAU to reach the desired @target_pos, @target_quat configuration, where T is the number of interpolated steps and D is the number of robot joints. """ - cmd_plan = self.mg[emb_sel].get_full_js(path) + cmd_plan = self.mg[emb_sel].get_full_js(path) if get_full_js else path return cmd_plan.get_ordered_joint_state(self.robot_joint_names).position def convert_q_to_eef_traj(self, traj, return_axisangle=False, emb_sel=CuroboEmbodimentSelection.DEFAULT): From fe3c00349b91965e844a812cee28270d05eefe27 Mon Sep 17 00:00:00 2001 From: Chengshu Li Date: Tue, 10 Dec 2024 16:04:51 -0800 Subject: [PATCH 25/76] remove test_curobo.py from all robots --- omnigibson/robots/fetch.py | 18 ----------------- omnigibson/robots/r1.py | 27 ------------------------- omnigibson/robots/tiago.py | 40 -------------------------------------- 3 files changed, 85 deletions(-) diff --git a/omnigibson/robots/fetch.py b/omnigibson/robots/fetch.py index 0fd6496b2..e558a9806 100644 --- a/omnigibson/robots/fetch.py +++ b/omnigibson/robots/fetch.py @@ -283,24 +283,6 @@ def camera_joint_names(self): def trunk_joint_names(self): return ["torso_lift_joint"] - @property - def manipulation_link_names(self): - return [ - "torso_lift_link", - "head_pan_link", - "head_tilt_link", - "shoulder_pan_link", - "shoulder_lift_link", - "upperarm_roll_link", - "elbow_flex_link", - "forearm_roll_link", - "wrist_flex_link", - "wrist_roll_link", - "gripper_link", - "l_gripper_finger_link", - "r_gripper_finger_link", - ] - @property def arm_link_names(self): return { diff --git a/omnigibson/robots/r1.py b/omnigibson/robots/r1.py index 2270ba71e..753d771a1 100644 --- a/omnigibson/robots/r1.py +++ b/omnigibson/robots/r1.py @@ -206,33 +206,6 @@ def trunk_link_names(self): def trunk_joint_names(self): return [f"torso_joint{i}" for i in range(1, 5)] - @property - def manipulation_link_names(self): - return [ - "torso_link1", - "torso_link2", - "torso_link3", - "torso_link4", - "left_arm_link1", - "left_arm_link2", - "left_arm_link3", - "left_arm_link4", - "left_arm_link5", - "left_arm_link6", - "left_gripper_link1", - "left_gripper_link2", - "left_hand", - "right_arm_link1", - "right_arm_link2", - "right_arm_link3", - "right_arm_link4", - "right_arm_link5", - "right_arm_link6", - "right_gripper_link1", - "right_gripper_link2", - "right_hand", - ] - @classproperty def n_arms(cls): return 2 diff --git a/omnigibson/robots/tiago.py b/omnigibson/robots/tiago.py index 43614b8b4..2c9cc4ab4 100644 --- a/omnigibson/robots/tiago.py +++ b/omnigibson/robots/tiago.py @@ -371,46 +371,6 @@ def camera_joint_names(self): def trunk_joint_names(self): return ["torso_lift_joint"] - @property - def manipulation_link_names(self): - return [ - "torso_fixed_link", - "torso_lift_link", - "arm_left_1_link", - "arm_left_2_link", - "arm_left_3_link", - "arm_left_4_link", - "arm_left_5_link", - "arm_left_6_link", - "arm_left_7_link", - "arm_left_tool_link", - "wrist_left_ft_link", - "wrist_left_ft_tool_link", - "gripper_left_link", - # "gripper_left_grasping_frame", - "gripper_left_left_finger_link", - "gripper_left_right_finger_link", - "gripper_left_tool_link", - "arm_right_1_link", - "arm_right_2_link", - "arm_right_3_link", - "arm_right_4_link", - "arm_right_5_link", - "arm_right_6_link", - "arm_right_7_link", - "arm_right_tool_link", - "wrist_right_ft_link", - "wrist_right_ft_tool_link", - "gripper_right_link", - # "gripper_right_grasping_frame", - "gripper_right_left_finger_link", - "gripper_right_right_finger_link", - "gripper_right_tool_link", - "head_1_link", - "head_2_link", - "xtion_link", - ] - @property def arm_link_names(self): return {arm: [f"arm_{arm}_{i}_link" for i in range(1, 8)] for arm in self.arm_names} From 82fffbec73313085e50112793b0dcda677a9c0c9 Mon Sep 17 00:00:00 2001 From: Chengshu Li Date: Wed, 11 Dec 2024 11:10:23 -0800 Subject: [PATCH 26/76] syntax fix --- omnigibson/action_primitives/curobo.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/omnigibson/action_primitives/curobo.py b/omnigibson/action_primitives/curobo.py index a335d3285..f49176665 100644 --- a/omnigibson/action_primitives/curobo.py +++ b/omnigibson/action_primitives/curobo.py @@ -415,7 +415,7 @@ def update_locked_joints(self, cu_joint_state, emb_sel): kc.fixed_transforms[link_idx] = relative_pose def solve_ik_batch( - self, start_state, goal_pose, plan_config, link_poses=None, emb_sel=CuroboEmbodimentSelection.DEFAULT + self, start_state, goal_pose, plan_config, link_poses=None, emb_sel=CuRoboEmbodimentSelection.DEFAULT ): """Find IK solutions to reach a batch of goal poses from a batch of start joint states. @@ -456,7 +456,7 @@ def solve_ik_batch( return result, success, joint_state def plan_batch( - self, start_state, goal_pose, plan_config, link_poses=None, emb_sel=CuroboEmbodimentSelection.DEFAULT + self, start_state, goal_pose, plan_config, link_poses=None, emb_sel=CuRoboEmbodimentSelection.DEFAULT ): """Plan a batch of trajectories from a batch of start joint states to a batch of goal poses. From 21fa12e119b0d21d47d5402f2233361413fd7ca0 Mon Sep 17 00:00:00 2001 From: Chengshu Li Date: Wed, 11 Dec 2024 14:30:42 -0800 Subject: [PATCH 27/76] use curobo for IK in primitive --- omnigibson/action_primitives/curobo.py | 27 ++++-- .../starter_semantic_action_primitives.py | 93 +++++++++++-------- omnigibson/examples/robots/curobo_example.py | 2 +- omnigibson/tasks/grasp_task.py | 2 +- 4 files changed, 78 insertions(+), 46 deletions(-) diff --git a/omnigibson/action_primitives/curobo.py b/omnigibson/action_primitives/curobo.py index f49176665..8634852ae 100644 --- a/omnigibson/action_primitives/curobo.py +++ b/omnigibson/action_primitives/curobo.py @@ -332,7 +332,7 @@ def update_obstacles_fast(self): def check_collisions( self, q, - check_self_collision=True, + self_collision_check=True, skip_obstacle_update=False, ): """ @@ -341,7 +341,7 @@ def check_collisions( Args: q (th.tensor): (N, D)-shaped tensor, representing N-total different joint configurations to check collisions against the world - check_self_collision (bool): Whether to check self-collisions or not + self_collision_check (bool): Whether to check self-collisions or not emb_sel (CuRoboEmbodimentSelection): Which embodiment selection to use for checking collisions Returns: @@ -378,7 +378,7 @@ def check_collisions( self.mg[emb_sel].rollout_fn.primitive_collision_constraint.forward(robot_spheres).squeeze(1) ) collision_results = collision_dist > 0.0 - if check_self_collision: + if self_collision_check: self_collision_dist = ( self.mg[emb_sel].rollout_fn.robot_self_collision_constraint.forward(robot_spheres).squeeze(1) ) @@ -500,6 +500,7 @@ def compute_trajectories( attached_obj_scale=None, skip_obstacle_update=False, ik_only=False, + ik_world_collision_check=True, emb_sel=CuRoboEmbodimentSelection.DEFAULT, ): """ @@ -535,6 +536,7 @@ def compute_trajectories( link names and the values are the corresponding scale to apply to the attached object skip_obstacle_update (bool): Whether to skip updating the obstacles in the world collision checker ik_only (bool): Whether to only run the IK solver and not the trajectory optimization + ik_world_collision_check (bool): Whether to check for collisions in the world when running the IK solver for ik_only mode emb_sel (CuRoboEmbodimentSelection): Which embodiment selection to use for computing trajectories Returns: 2-tuple or list of MotionGenResult: If @return_full_result is True, will return a list of raw MotionGenResult @@ -689,15 +691,28 @@ def compute_trajectories( else rollout_fn.pose_convergence.disable_cost() ) for additional_link in self.additional_links[emb_sel]: + ( + rollout_fn._link_pose_costs[additional_link].enable_cost() + if additional_link in target_pos + else rollout_fn._link_pose_costs[additional_link].disable_cost() + ) ( rollout_fn._link_pose_convergence[additional_link].enable_cost() if additional_link in target_pos else rollout_fn._link_pose_convergence[additional_link].disable_cost() ) + + if ik_only: + for rollout_fn in self.mg[emb_sel].ik_solver.get_all_rollout_instances(): ( - rollout_fn._link_pose_costs[additional_link].enable_cost() - if additional_link in target_pos - else rollout_fn._link_pose_costs[additional_link].disable_cost() + rollout_fn.primitive_collision_cost.enable_cost() + if ik_world_collision_check + else rollout_fn.primitive_collision_cost.disable_cost() + ) + ( + rollout_fn.primitive_collision_constraint.enable_cost() + if ik_world_collision_check + else rollout_fn.primitive_collision_constraint.disable_cost() ) # Determine how many internal batches we need to run based on submitted size diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 1b59a0638..8611df549 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -25,7 +25,7 @@ ActionPrimitiveErrorGroup, BaseActionPrimitiveSet, ) -from omnigibson.action_primitives.curobo import CuroboEmbodimentSelection, CuRoboMotionGenerator +from omnigibson.action_primitives.curobo import CuRoboEmbodimentSelection, CuRoboMotionGenerator from omnigibson.controllers import DifferentialDriveController, InverseKinematicsController, JointController from omnigibson.macros import create_module_macros from omnigibson.objects.object_base import BaseObject @@ -660,12 +660,13 @@ def _place_with_predicate(self, obj, predicate): # Sample location to place object pose_candidates = [] directly_move_hand_pose = None + self._motion_generator.update_obstacles() while len(pose_candidates) < m.MAX_ATTEMPTS_FOR_SAMPLING_PLACE_POSE: obj_pose = self._sample_pose_with_object_and_predicate(predicate, obj_in_hand, obj) # TODO: why do we sample this orientation? obj_pose = (obj_pose[0], obj_in_hand.get_position_orientation()[1]) hand_pose = self._get_hand_pose_for_object_pose(obj_pose) - if self._target_in_reach_of_robot(hand_pose)[self.arm]: + if self._target_in_reach_of_robot(hand_pose, skip_obstacle_update=True)[self.arm]: directly_move_hand_pose = hand_pose break else: @@ -730,25 +731,27 @@ def _convert_cartesian_to_joint_space(self, target_pose, arm=None): ) return joint_pos - def _target_in_reach_of_robot(self, target_pose): + def _target_in_reach_of_robot(self, target_pose, skip_obstacle_update=False): """ Determines whether the eef for the robot can reach the target pose in the world frame Args: target_pose (Iterable of array): Position and orientation arrays in an iterable for the pose for the eef + skip_obstacle_update (bool): Whether to skip updating obstacles Returns: dict: Whether each eef can reach the target pose """ relative_target_pose = self._world_pose_to_robot_pose(target_pose) - return self._target_in_reach_of_robot_relative(relative_target_pose) + return self._target_in_reach_of_robot_relative(relative_target_pose, skip_obstacle_update=skip_obstacle_update) - def _target_in_reach_of_robot_relative(self, relative_target_pose): + def _target_in_reach_of_robot_relative(self, relative_target_pose, skip_obstacle_update=False): """ Determines whether eef for the robot can reach the target pose where the target pose is in the robot frame Args: - target_pose (Iterable of array): Position and orientation arrays in an iterable for pose for the eef + relative_target_pose (Iterable of array): Position and orientation arrays in an iterable for pose for the eef in the robot frame + skip_obstacle_update (bool): Whether to skip updating obstacles Returns: dict: Whether each eef can reach the target pose @@ -756,7 +759,12 @@ def _target_in_reach_of_robot_relative(self, relative_target_pose): # TODO: output may need to be a dictionary of arms and whether they can reach the target pose target_in_reach = dict() for arm in self.robot.arm_names: - target_in_reach[arm] = self._ik_solver_cartesian_to_joint_space(relative_target_pose, arm=arm) is not None + target_in_reach[arm] = ( + self._ik_solver_cartesian_to_joint_space( + relative_target_pose, arm=arm, skip_obstacle_update=skip_obstacle_update + ) + is not None + ) return target_in_reach def _manipulation_control_idx(self, arm=None): @@ -764,19 +772,16 @@ def _manipulation_control_idx(self, arm=None): if arm is None: arm = self.arm - if m.TORSO_FIXED or arm == "right": - return self.robot.arm_control_idx[arm] - else: - # Only append trunk control idx if it is not fixed and the arm is left - if hasattr(self.robot, "trunk_control_idx"): - return th.cat([self.robot.trunk_control_idx, self.robot.arm_control_idx[arm]]) + return th.cat([self.robot.trunk_control_idx, self.robot.arm_control_idx[arm]]) - def _ik_solver_cartesian_to_joint_space(self, relative_target_pose, arm=None): + def _ik_solver_cartesian_to_joint_space(self, relative_target_pose, arm=None, skip_obstacle_update=False): """ Get joint positions for the arm so eef is at the target pose where the target pose is in the robot frame Args: relative_target_pose (Iterable of array): Position and orientation arrays in an iterable for pose in the robot frame + arm (str): Arm to use for the target pose + skip_obstacle_update (bool): Whether to skip updating obstacles Returns: 2-tuple @@ -785,23 +790,27 @@ def _ik_solver_cartesian_to_joint_space(self, relative_target_pose, arm=None): """ if arm is None: arm = self.arm - # TODO: make sure descriptor file match torso & arm setting - ik_solver = IKSolver( - robot_description_path=self.robot.robot_arm_descriptor_yamls[arm], - robot_urdf_path=self.robot.urdf_path, - reset_joint_pos=self.robot.reset_joint_pos[self._manipulation_control_idx(arm)], - eef_name=self.robot.eef_link_names[self.arm], - ) - # Grab the joint positions in order to reach the desired pose target - joint_pos = ik_solver.solve( - target_pos=relative_target_pose[0], - target_quat=relative_target_pose[1], - max_iterations=200, - initial_joint_pos=self.robot.get_joint_positions()[self._manipulation_control_idx(arm)], - tolerance_pos=0.005, + world_pose = self._robot_pose_to_world_pose(relative_target_pose) + target_pos = {self.robot.eef_link_names[arm]: world_pose[0]} + target_quat = {self.robot.eef_link_names[arm]: world_pose[1]} + successes, joint_states = self._motion_generator.compute_trajectories( + target_pos=target_pos, + target_quat=target_quat, + is_local=False, + return_full_result=False, + ik_only=True, + ik_world_collision_check=False, + skip_obstacle_update=skip_obstacle_update, + emb_sel=CuRoboEmbodimentSelection.ARM, ) + if not successes[0]: + return None - return joint_pos + joint_state = joint_states[0] + joint_pos = self._motion_generator.path_to_joint_trajectory( + joint_state, get_full_js=False, emb_sel=CuRoboEmbodimentSelection.ARM + ) + return joint_pos[self._manipulation_control_idx(arm)].cpu() def _move_hand( self, @@ -852,9 +861,9 @@ def _move_hand( q_traj = self._plan_joint_motion( target_pos=target_pos, target_quat=target_quat, - embodiment_selection=CuroboEmbodimentSelection.ARM, + embodiment_selection=CuRoboEmbodimentSelection.ARM, motion_constraint=motion_constraint, - ).cpu() + ) else: # Move EEF directly without collsion checking goal_arm_joint_pos = self._convert_cartesian_to_joint_space(target_pose, arm=arm) @@ -862,6 +871,7 @@ def _move_hand( goal_joint_pos = curr_joint_pos.clone() goal_joint_pos[self._manipulation_control_idx(arm)] = goal_arm_joint_pos q_traj = th.stack(self._add_linearly_interpolated_waypoints(plan=[curr_joint_pos, goal_joint_pos])) + indented_print(f"Plan has {len(q_traj)} steps") yield from self._execute_motion_plan(q_traj, stop_on_contact=not avoid_collision, low_precision=low_precision) @@ -869,7 +879,7 @@ def _plan_joint_motion( self, target_pos, target_quat, - embodiment_selection=CuroboEmbodimentSelection.DEFAULT, + embodiment_selection=CuRoboEmbodimentSelection.DEFAULT, attached_obj=None, motion_constraint=None, ): @@ -919,7 +929,10 @@ def _plan_joint_motion( "There is no accessible path from where you are to the desired pose. Try again", ) - return self._motion_generator.path_to_joint_trajectory(traj_path, embodiment_selection) + q_traj = self._motion_generator.path_to_joint_trajectory( + traj_path, get_full_js=True, emb_sel=embodiment_selection + ) + return q_traj.cpu() def _execute_motion_plan( self, q_traj, stop_on_contact=False, ignore_failure=False, low_precision=False, ignore_physics=False @@ -1439,9 +1452,9 @@ def _reset_robot(self, attached_obj=None): q_traj = self._plan_joint_motion( target_pos=target_pos, target_quat=target_quat, - embodiment_selection=CuroboEmbodimentSelection.ARM, + embodiment_selection=CuRoboEmbodimentSelection.ARM, attached_obj=attached_obj, - ).cpu() + ) indented_print(f"Plan has {len(q_traj)} steps") yield from self._execute_motion_plan(q_traj, low_precision=True) @@ -1596,7 +1609,7 @@ def _navigate_to_pose(self, pose_2d): target_pos = {self.robot.base_footprint_link_name: pose_3d[0]} target_quat = {self.robot.base_footprint_link_name: pose_3d[1]} - q_traj = self._plan_joint_motion(target_pos, target_quat, CuroboEmbodimentSelection.BASE).cpu() + q_traj = self._plan_joint_motion(target_pos, target_quat, CuRoboEmbodimentSelection.BASE) yield from self._execute_motion_plan(q_traj, stop_on_contact=True) def _draw_plan(self, plan): @@ -1931,6 +1944,8 @@ def _validate_poses(self, candidate_poses, pose_on_obj=None, arm=None, skip_obst candidate_poses (list of arrays): Candidate 2d poses (x, y, yaw) pose_on_obj (Iterable of arrays or list of Iterables): Pose(s) on the object(s) in the world frame. Can be a single pose or list of poses. + arm (str): Name of the arm to check reachability for + skip_obstacle_update (bool): Whether to skip updating the obstacles in the motion generator Returns: list of bool: Whether any robot arm can reach all poses on the objects and is not in collision @@ -1950,7 +1965,7 @@ def _validate_poses(self, candidate_poses, pose_on_obj=None, arm=None, skip_obst candidate_joint_positions = th.stack(candidate_joint_positions) invalid_results = self._motion_generator.check_collisions( - candidate_joint_positions, check_self_collision=False, skip_obstacle_update=skip_obstacle_update + candidate_joint_positions, self_collision_check=False, skip_obstacle_update=skip_obstacle_update ).cpu() # For each candidate that passed collision check, verify reachability @@ -1961,7 +1976,9 @@ def _validate_poses(self, candidate_poses, pose_on_obj=None, arm=None, skip_obst if pose_on_obj is not None: pose = self._get_robot_pose_from_2d_pose(candidate_poses[i]) relative_pose = T.relative_pose_transform(*pose_on_obj, *pose) - if not self._target_in_reach_of_robot_relative(relative_pose)[arm]: + if not self._target_in_reach_of_robot_relative( + relative_pose, skip_obstacle_update=skip_obstacle_update + )[arm]: invalid_results[i] = True return ~invalid_results diff --git a/omnigibson/examples/robots/curobo_example.py b/omnigibson/examples/robots/curobo_example.py index ae54ce866..3b0577a3a 100644 --- a/omnigibson/examples/robots/curobo_example.py +++ b/omnigibson/examples/robots/curobo_example.py @@ -58,7 +58,7 @@ def plan_and_execute_trajectory( if success: print("Successfully planned trajectory") if not dry_run: - q_traj = cmg.path_to_joint_trajectory(traj_path, emb_sel) + q_traj = cmg.path_to_joint_trajectory(traj_path, get_full_js=True, emb_sel=emb_sel) execute_trajectory(q_traj, env, robot, attached_obj) else: print("Failed to plan trajectory") diff --git a/omnigibson/tasks/grasp_task.py b/omnigibson/tasks/grasp_task.py index 7ccd6b123..b7c7a7eae 100644 --- a/omnigibson/tasks/grasp_task.py +++ b/omnigibson/tasks/grasp_task.py @@ -109,7 +109,7 @@ def _reset_agent(self, env): joint_pos, joint_control_idx = self._get_random_joint_position(robot) initial_joint_pos[control_idx_in_joint_pos] = joint_pos collision_detected = self._primitive_controller._motion_generator.check_collisions( - [initial_joint_pos], check_self_collision=False + [initial_joint_pos], self_collision_check=False ).cpu()[0] if not collision_detected: robot.set_joint_positions(joint_pos, joint_control_idx) From b284d28c676e1971e4d7118ae08fa015e57988ad Mon Sep 17 00:00:00 2001 From: Chengshu Li Date: Wed, 11 Dec 2024 15:44:14 -0800 Subject: [PATCH 28/76] cleanup robot and world frame --- .../starter_semantic_action_primitives.py | 39 ++++++++++++++----- 1 file changed, 29 insertions(+), 10 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 8611df549..6902aa680 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -722,8 +722,7 @@ def _convert_cartesian_to_joint_space(self, target_pose, arm=None): - th.tensor or None: Joint positions to reach target pose or None if impossible to reach target pose - th.tensor: Indices for joints in the robot """ - relative_target_pose = self._world_pose_to_robot_pose(target_pose) - joint_pos = self._ik_solver_cartesian_to_joint_space(relative_target_pose, arm=arm) + joint_pos = self._ik_solver_cartesian_to_joint_space(target_pose, arm=arm, frame="world") if joint_pos is None: raise ActionPrimitiveError( ActionPrimitiveError.Reason.PLANNING_ERROR, @@ -742,8 +741,18 @@ def _target_in_reach_of_robot(self, target_pose, skip_obstacle_update=False): Returns: dict: Whether each eef can reach the target pose """ - relative_target_pose = self._world_pose_to_robot_pose(target_pose) - return self._target_in_reach_of_robot_relative(relative_target_pose, skip_obstacle_update=skip_obstacle_update) + target_in_reach = dict() + for arm in self.robot.arm_names: + target_in_reach[arm] = ( + self._ik_solver_cartesian_to_joint_space( + target_pose, + arm=arm, + skip_obstacle_update=skip_obstacle_update, + frame="world", + ) + is not None + ) + return target_in_reach def _target_in_reach_of_robot_relative(self, relative_target_pose, skip_obstacle_update=False): """ @@ -756,12 +765,14 @@ def _target_in_reach_of_robot_relative(self, relative_target_pose, skip_obstacle Returns: dict: Whether each eef can reach the target pose """ - # TODO: output may need to be a dictionary of arms and whether they can reach the target pose target_in_reach = dict() for arm in self.robot.arm_names: target_in_reach[arm] = ( self._ik_solver_cartesian_to_joint_space( - relative_target_pose, arm=arm, skip_obstacle_update=skip_obstacle_update + relative_target_pose, + arm=arm, + skip_obstacle_update=skip_obstacle_update, + frame="robot", ) is not None ) @@ -774,7 +785,7 @@ def _manipulation_control_idx(self, arm=None): return th.cat([self.robot.trunk_control_idx, self.robot.arm_control_idx[arm]]) - def _ik_solver_cartesian_to_joint_space(self, relative_target_pose, arm=None, skip_obstacle_update=False): + def _ik_solver_cartesian_to_joint_space(self, target_pose, arm=None, skip_obstacle_update=False, frame="robot"): """ Get joint positions for the arm so eef is at the target pose where the target pose is in the robot frame @@ -782,6 +793,7 @@ def _ik_solver_cartesian_to_joint_space(self, relative_target_pose, arm=None, sk relative_target_pose (Iterable of array): Position and orientation arrays in an iterable for pose in the robot frame arm (str): Arm to use for the target pose skip_obstacle_update (bool): Whether to skip updating obstacles + frame (str): Frame to use for the target pose Returns: 2-tuple @@ -790,9 +802,16 @@ def _ik_solver_cartesian_to_joint_space(self, relative_target_pose, arm=None, sk """ if arm is None: arm = self.arm - world_pose = self._robot_pose_to_world_pose(relative_target_pose) - target_pos = {self.robot.eef_link_names[arm]: world_pose[0]} - target_quat = {self.robot.eef_link_names[arm]: world_pose[1]} + + if frame == "robot": + target_pose = self._robot_pose_to_world_pose(target_pose) + elif frame == "world": + pass + else: + raise ValueError(f"Unsupported frame: {frame}") + + target_pos = {self.robot.eef_link_names[arm]: target_pose[0]} + target_quat = {self.robot.eef_link_names[arm]: target_pose[1]} successes, joint_states = self._motion_generator.compute_trajectories( target_pos=target_pos, target_quat=target_quat, From 1eb7244f6bbbe21d46a6ed7f5a3537b8104c5851 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Thu, 12 Dec 2024 16:27:16 -0800 Subject: [PATCH 29/76] Fix unit test failures --- .../starter_semantic_action_primitives.py | 62 ++++++++++++------- .../symbolic_semantic_action_primitives.py | 4 +- omnigibson/robots/r1.py | 3 +- tests/test_robot_states_flatcache.py | 4 +- tests/test_symbolic_primitives.py | 10 +-- 5 files changed, 49 insertions(+), 34 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 6902aa680..c6a874027 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -136,6 +136,7 @@ def __init__( planning_batch_size=3, collision_check_batch_size=5, debug_visual_marker=None, + skip_curobo_initilization=False, ): """ Initializes a StarterSemanticActionPrimitives generator. @@ -151,13 +152,13 @@ def __init__( planning_batch_size (int): The batch size for curobo motion planning. Defaults to 3. collision_check_batch_size (int): The batch size for curobo collision checking. Defaults to 5. debug_visual_marker (PrimitiveObject): The object to use for debug visual markers. Defaults to None. + skip_curobo_initilization (bool): Whether to skip curobo initialization. Defaults to False. """ log.warning( "The StarterSemanticActionPrimitive is a work-in-progress and is only provided as an example. " "It currently only works with Tiago and R1 with their JointControllers set to absolute position mode with impedance." ) super().__init__(env, robot) - self._motion_generator = CuRoboMotionGenerator(robot=self.robot, batch_size=planning_batch_size) self.controller_functions = { StarterSemanticActionPrimitiveSet.GRASP: self._grasp, StarterSemanticActionPrimitiveSet.PLACE_ON_TOP: self._place_on_top, @@ -169,19 +170,18 @@ def __init__( StarterSemanticActionPrimitiveSet.TOGGLE_ON: self._toggle_on, StarterSemanticActionPrimitiveSet.TOGGLE_OFF: self._toggle_off, } - # Validate the robot - if isinstance(self.robot, LocomotionRobot): - base_controller = self.robot.controllers["base"] - assert ( - isinstance(base_controller, (JointController)) and not base_controller.use_delta_commands - ), "StarterSemanticActionPrimitives only works with a JointController with absolute mode at the robot base." + self._motion_generator = ( + None + if skip_curobo_initilization + else CuRoboMotionGenerator(robot=self.robot, batch_size=planning_batch_size) + ) self._task_relevant_objects_only = task_relevant_objects_only self._enable_head_tracking = enable_head_tracking self._always_track_eef = always_track_eef self._tracking_object = None - self._arm = self.robot.default_arm + self._arm = self.robot.default_arm if isinstance(self.robot, ManipulationRobot) else None # Store the current position of the arm as the arm target control_dict = self.robot.get_control_dict() @@ -1728,15 +1728,20 @@ def _navigate_to_pose_direct(self, pose_2d, low_precision=False): else: action = self._empty_action() - base_action_size = self.robot.controller_action_idx["base"].numel() - assert ( - base_action_size == 3 - ), "Currently, the action primitives only support [x, y, theta] joint controller" - - base_action = th.tensor( - [body_target_pose[0][0], body_target_pose[0][1], body_target_pose[1]], dtype=th.float32 - ) - action[self.robot.controller_action_idx["base"]] = base_action + if isinstance(self.robot.controllers["base"], JointController): + base_action_size = self.robot.controller_action_idx["base"].numel() + assert ( + base_action_size == 3 + ), "Currently, the action primitives only support [x, y, theta] joint controller" + direction_vec = ( + body_target_pose[0][:2] / th.norm(body_target_pose[0][:2]) * m.KP_LIN_VEL[type(self.robot)] + ) + base_action = th.tensor([direction_vec[0], direction_vec[1], 0.0], dtype=th.float32) + action[self.robot.controller_action_idx["base"]] = base_action + else: + # Diff drive controller + base_action = th.tensor([m.KP_LIN_VEL[type(self.robot)], 0.0], dtype=th.float32) + action[self.robot.controller_action_idx["base"]] = base_action yield self._postprocess_action(action) @@ -1771,14 +1776,25 @@ def _rotate_in_place(self, end_pose, angle_threshold=m.DEFAULT_ANGLE_THRESHOLD): action = self._empty_action() + direction = -1.0 if diff_yaw < 0.0 else 1.0 + ang_vel = m.KP_ANGLE_VEL[type(self.robot)] * direction + + if isinstance(self.robot, Locobot) or isinstance(self.robot, Freight): + # Locobot and Freight wheel joints are reversed + ang_vel = -ang_vel + base_action = action[self.robot.controller_action_idx["base"]] - assert ( - base_action.numel() == 3 - ), "Currently, the action primitives only support [x, y, theta] joint controller" - base_action[0] = 0.0 - base_action[1] = 0.0 - base_action[2] = T.quat2euler(body_target_pose[1])[2].item() + if not isinstance(self.robot.controllers["base"], JointController): + base_action[0] = 0.0 + base_action[1] = ang_vel + else: + assert ( + base_action.numel() == 3 + ), "Currently, the action primitives only support [x, y, theta] joint controller" + base_action[0] = 0.0 + base_action[1] = 0.0 + base_action[2] = ang_vel action[self.robot.controller_action_idx["base"]] = base_action yield self._postprocess_action(action) diff --git a/omnigibson/action_primitives/symbolic_semantic_action_primitives.py b/omnigibson/action_primitives/symbolic_semantic_action_primitives.py index 74d588ed3..abb822245 100644 --- a/omnigibson/action_primitives/symbolic_semantic_action_primitives.py +++ b/omnigibson/action_primitives/symbolic_semantic_action_primitives.py @@ -40,8 +40,8 @@ class SymbolicSemanticActionPrimitiveSet(IntEnum): class SymbolicSemanticActionPrimitives(StarterSemanticActionPrimitives): - def __init__(self, env): - super().__init__(env) + def __init__(self, env, robot): + super().__init__(env, robot) self.controller_functions = { SymbolicSemanticActionPrimitiveSet.GRASP: self._grasp, SymbolicSemanticActionPrimitiveSet.PLACE_ON_TOP: self._place_on_top, diff --git a/omnigibson/robots/r1.py b/omnigibson/robots/r1.py index f24a37905..2de0deae3 100644 --- a/omnigibson/robots/r1.py +++ b/omnigibson/robots/r1.py @@ -168,10 +168,9 @@ def untucked_default_joint_pos(self): pos = th.zeros(self.n_dof) # Keep the current joint positions for the base joints pos[self.base_idx] = self.get_joint_positions()[self.base_idx] - pos[self.arm_control_idx["left"]] = th.tensor([-0.0464, 2.6172, -1.4584, -0.0433, 1.5899, -1.1587]) - pos[self.arm_control_idx["right"]] = th.tensor([0.0464, 2.6168, -1.4570, 0.0418, -1.5896, 1.1593]) for arm in self.arm_names: pos[self.gripper_control_idx[arm]] = th.tensor([0.03, 0.03]) # open gripper + pos[self.arm_control_idx[arm]] = th.tensor([0.0, 1.906, -0.991, 1.571, 0.915, -1.571]) return pos @property diff --git a/tests/test_robot_states_flatcache.py b/tests/test_robot_states_flatcache.py index 050416677..c578e5e83 100644 --- a/tests/test_robot_states_flatcache.py +++ b/tests/test_robot_states_flatcache.py @@ -185,7 +185,7 @@ def test_robot_load_drive(): robot.reload_controllers(controller_config=controller_config) env.scene.update_initial_state() - action_primitives = StarterSemanticActionPrimitives(robot) + action_primitives = StarterSemanticActionPrimitives(env, robot, skip_curobo_initilization=True) eef_pos = env.robots[0].get_eef_position() eef_orn = env.robots[0].get_eef_orientation() @@ -200,7 +200,7 @@ def test_robot_load_drive(): # If this is a locomotion robot, we want to test driving if isinstance(robot, LocomotionRobot): - action_primitives = StarterSemanticActionPrimitives(robot) + action_primitives = StarterSemanticActionPrimitives(env, robot, skip_curobo_initilization=True) goal_location = th.tensor([0, 1, 0], dtype=th.float32) for action in action_primitives._navigate_to_pose_direct(goal_location): env.step(action) diff --git a/tests/test_symbolic_primitives.py b/tests/test_symbolic_primitives.py index 4793686b7..ecfa300ca 100644 --- a/tests/test_symbolic_primitives.py +++ b/tests/test_symbolic_primitives.py @@ -18,7 +18,7 @@ gm.USE_GPU_DYNAMICS = True gm.ENABLE_TRANSITION_RULES = True -current_robot_type = "Fetch" +current_robot_type = "R1" def load_robot_config(robot_name): @@ -38,7 +38,7 @@ def start_env(robot_type): current_robot_type = robot_type og.clear() - if robot_type not in ["Fetch", "Tiago"]: + if robot_type not in ["R1", "Tiago"]: raise ValueError("Invalid robot configuration") robots = load_robot_config(robot_type) config = { @@ -72,7 +72,7 @@ def start_env(robot_type): "category": "apple", "model": "agveuv", "position": [4.75, 10.75, 1.0], - "bounding_box": [0.098, 0.098, 0.115], + "bounding_box": [0.05, 0.05, 0.05], }, { "type": "DatasetObject", @@ -103,7 +103,7 @@ def retrieve_obj_cfg(obj): def pytest_generate_tests(metafunc): if "robot_type" in metafunc.fixturenames: - metafunc.parametrize("robot_type", ["Fetch", "Tiago"], scope="session") + metafunc.parametrize("robot_type", ["R1", "Tiago"], scope="session") @pytest.fixture(scope="module") @@ -127,7 +127,7 @@ def robot(env): @pytest.fixture def prim_gen(env): - return SymbolicSemanticActionPrimitives(env) + return SymbolicSemanticActionPrimitives(env, env.robots[0]) @pytest.fixture From 1c677948ad3126a1ca540f61af71be2fd767f3d0 Mon Sep 17 00:00:00 2001 From: Chengshu Li Date: Fri, 20 Dec 2024 13:02:57 -0800 Subject: [PATCH 30/76] add reverse_preprocess_command to all curobo joint pos execution --- .../starter_semantic_action_primitives.py | 3 ++- omnigibson/examples/robots/curobo_example.py | 17 +++++++++-------- tests/test_curobo.py | 11 ++++++----- 3 files changed, 17 insertions(+), 14 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index c4f990dd4..155d3d00b 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -1010,7 +1010,8 @@ def _execute_motion_plan( def _q_to_action(self, q): action = [] for controller in self.robot.controllers.values(): - action.append(q[controller.dof_idx]) + command = q[controller.dof_idx] + action.append(controller._reverse_preprocess_command(command)) action = th.cat(action, dim=0) assert action.shape[0] == self.robot.action_dim return action diff --git a/omnigibson/examples/robots/curobo_example.py b/omnigibson/examples/robots/curobo_example.py index 3b0577a3a..d3202b63d 100644 --- a/omnigibson/examples/robots/curobo_example.py +++ b/omnigibson/examples/robots/curobo_example.py @@ -37,7 +37,7 @@ def execute_trajectory(q_traj, env, robot, attached_obj): for i, q in enumerate(q_traj): q = q.cpu() q = set_gripper_joint_positions(robot, q, attached_obj) - command = q_to_command(q, robot) + command = q_to_action(q, robot) num_repeat = 5 print(f"Executing waypoint {i}/{len(q_traj)}") @@ -79,21 +79,22 @@ def control_gripper(env, robot, attached_obj): # Control the gripper to open or close, while keeping the rest of the robot still q = robot.get_joint_positions() q = set_gripper_joint_positions(robot, q, attached_obj) - command = q_to_command(q, robot) + command = q_to_action(q, robot) num_repeat = 30 print(f"Gripper (attached_obj={attached_obj})") for _ in range(num_repeat): env.step(command) -def q_to_command(q, robot): +def q_to_action(q, robot): # Convert target joint positions to command - command = [] + action = [] for controller in robot.controllers.values(): - command.append(q[controller.dof_idx]) - command = th.cat(command, dim=0) - assert command.shape[0] == robot.action_dim - return command + command = q[controller.dof_idx] + action.append(controller._reverse_preprocess_command(command)) + action = th.cat(action, dim=0) + assert action.shape[0] == robot.action_dim + return action def test_curobo(): diff --git a/tests/test_curobo.py b/tests/test_curobo.py index 4161a1e8a..450a0a156 100644 --- a/tests/test_curobo.py +++ b/tests/test_curobo.py @@ -425,16 +425,17 @@ def test_curobo(): else: # Convert target joint positions to command q = q.cpu() - command = [] + action = [] for controller in robot.controllers.values(): - command.append(q[controller.dof_idx]) - command = th.cat(command, dim=0) - assert command.shape[0] == robot.action_dim + command = q[controller.dof_idx] + action.append(controller._reverse_preprocess_command(command)) + action = th.cat(action, dim=0) + assert action.shape[0] == robot.action_dim num_repeat = 3 for j in range(num_repeat): print(f"Executing waypoint {i}/{len(q_traj)}, step {j}") - env.step(command) + env.step(action) for contact in robot.contact_list(): assert contact.body0 in robot.link_prim_paths From 69651f89fbe25bfe43e4baaf51ae2d9ed80a176d Mon Sep 17 00:00:00 2001 From: Chengshu Li Date: Fri, 20 Dec 2024 15:09:55 -0800 Subject: [PATCH 31/76] fix minor bug for is_grasping in manipulation_robot --- omnigibson/robots/manipulation_robot.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/omnigibson/robots/manipulation_robot.py b/omnigibson/robots/manipulation_robot.py index 2cff48465..f0bbee817 100644 --- a/omnigibson/robots/manipulation_robot.py +++ b/omnigibson/robots/manipulation_robot.py @@ -252,8 +252,8 @@ def is_grasping(self, arm="default", candidate_obj=None): # If candidate obj is not None, we also check to see if our fingers are in contact with the object if is_grasping == IsGraspingState.TRUE and candidate_obj is not None: finger_links = {link for link in self.finger_links[arm]} - is_grasping = len(candidate_obj.states[ContactBodies].get_value().intersection(finger_links)) > 0 - + if len(candidate_obj.states[ContactBodies].get_value().intersection(finger_links)) == 0: + is_grasping = IsGraspingState.FALSE return is_grasping def _find_gripper_contacts(self, arm="default", return_contact_positions=False): From c87d7d1bb3b1569ca7cac5714a2d34d783a89c02 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Fri, 3 Jan 2025 13:27:02 -0800 Subject: [PATCH 32/76] Add attached object to collision checks --- omnigibson/action_primitives/curobo.py | 129 +++++++++++++----- .../starter_semantic_action_primitives.py | 29 ++-- 2 files changed, 112 insertions(+), 46 deletions(-) diff --git a/omnigibson/action_primitives/curobo.py b/omnigibson/action_primitives/curobo.py index 8634852ae..bef38f0c2 100644 --- a/omnigibson/action_primitives/curobo.py +++ b/omnigibson/action_primitives/curobo.py @@ -330,10 +330,7 @@ def update_obstacles_fast(self): world_coll_checker._mesh_tensor_list[1][0, i, :7] = inv_pose def check_collisions( - self, - q, - self_collision_check=True, - skip_obstacle_update=False, + self, q, self_collision_check=True, skip_obstacle_update=False, attached_obj=None, attached_obj_scale=None ): """ Checks collisions between the sphere representation of the robot and the rest of the current scene @@ -342,7 +339,11 @@ def check_collisions( q (th.tensor): (N, D)-shaped tensor, representing N-total different joint configurations to check collisions against the world self_collision_check (bool): Whether to check self-collisions or not - emb_sel (CuRoboEmbodimentSelection): Which embodiment selection to use for checking collisions + skip_obstacle_update (bool): Whether to skip updating the obstacles in the world collision checker + attached_obj (None or Dict[str, BaseObject]): If specified, a dictionary where the keys are the end-effector + link names and the values are the corresponding BaseObject instances to attach to that link + attached_obj_scale (None or Dict[str, float]): If specified, a dictionary where the keys are the end-effector + link names and the values are the corresponding scale to apply to the attached object Returns: th.tensor: (N,)-shaped tensor, where each value is True if in collision, else False @@ -369,6 +370,14 @@ def check_collisions( joint_names=self.robot_joint_names, ).get_ordered_joint_state(self.mg[emb_sel].kinematics.joint_names) + # Attach objects if specified + attached_info = self._attach_objects_to_robot( + attached_obj=attached_obj, + attached_obj_scale=attached_obj_scale, + cu_js_batch=cu_js, + emb_sel=emb_sel, + ) + robot_spheres = self.mg[emb_sel].compute_kinematics(cu_js).robot_spheres # (N_samples, n_spheres, 4) --> (N_samples, 1, n_spheres, 4) robot_spheres = robot_spheres.unsqueeze(dim=1) @@ -385,6 +394,9 @@ def check_collisions( self_collision_results = self_collision_dist > 0.0 collision_results = collision_results | self_collision_results + # Detach objects before returning + self._detach_objects_from_robot(attached_info, emb_sel) + # Return results return collision_results # shape (B,) @@ -649,28 +661,12 @@ def compute_trajectories( cu_js_batch = cu_joint_state.get_ordered_joint_state(self.mg[emb_sel].kinematics.joint_names) # Attach object to robot if requested - if attached_obj is not None: - for ee_link_name, obj in attached_obj.items(): - assert isinstance(obj, RigidPrim), "attached_object should be a RigidPrim object" - obj_paths = [geom.prim_path for geom in obj.collision_meshes.values()] - assert len(obj_paths) <= 32, f"Expected obj_paths to be at most 32, got: {len(obj_paths)}" - - position, quaternion = self.robot.links[ee_link_name].get_position_orientation() - # xyzw to wxyz - quaternion = quaternion[[3, 0, 1, 2]] - ee_pose = lazy.curobo.types.math.Pose(position=position, quaternion=quaternion).to(self._tensor_args) - self.mg[emb_sel].attach_objects_to_robot( - joint_state=cu_js_batch, - object_names=obj_paths, - ee_pose=ee_pose, - link_name=self.robot.curobo_attached_object_link_names[ee_link_name], - scale=1.0 if attached_obj_scale is None else attached_obj_scale[ee_link_name], - pitch_scale=1.0, - merge_meshes=True, - world_objects_pose_offset=lazy.curobo.types.math.Pose.from_list( - [0, 0, 0.01, 1, 0, 0, 0], self._tensor_args - ), - ) + attached_info = self._attach_objects_to_robot( + attached_obj=attached_obj, + attached_obj_scale=attached_obj_scale, + cu_js_batch=cu_js_batch, + emb_sel=emb_sel, + ) all_rollout_fns = [ fn @@ -786,12 +782,7 @@ def compute_trajectories( paths += joint_state[:end_idx] # Detach attached object if it was attached - if attached_obj is not None: - for ee_link_name, obj in attached_obj.items(): - self.mg[emb_sel].detach_object_from_robot( - object_names=[geom.prim_path for geom in obj.collision_meshes.values()], - link_name=self.robot.curobo_attached_object_link_names[ee_link_name], - ) + self._detach_objects_from_robot(attached_info, emb_sel) if return_full_result: return results @@ -876,3 +867,75 @@ def tensor_args(self): TensorDeviceType: tensor arguments used by this CuRobo instance """ return self._tensor_args + + def _attach_objects_to_robot( + self, + attached_obj, + attached_obj_scale, + cu_js_batch, + emb_sel, + ): + """ + Helper function to attach objects to the robot. + + Args: + attached_obj (None or Dict[str, BaseObject]): Dictionary mapping end-effector + link names to corresponding BaseObject instances + attached_obj_scale (None or Dict[str, float]): Dictionary mapping end-effector + link names to corresponding scale values + cu_js_batch (JointState): CuRobo joint state object ordered according to kinematics + emb_sel (CuRoboEmbodimentSelection): Which embodiment selection to use + + Returns: + list: List of attached object information for detachment + """ + if attached_obj is None: + return [] + + attached_info = [] + for ee_link_name, obj in attached_obj.items(): + assert isinstance(obj, RigidPrim), "attached_object should be a RigidPrim object" + obj_paths = [geom.prim_path for geom in obj.collision_meshes.values()] + assert len(obj_paths) <= 32, f"Expected obj_paths to be at most 32, got: {len(obj_paths)}" + + position, quaternion = self.robot.links[ee_link_name].get_position_orientation() + # xyzw to wxyz + quaternion = quaternion[[3, 0, 1, 2]] + ee_pose = lazy.curobo.types.math.Pose(position=position, quaternion=quaternion).to(self._tensor_args) + + scale = 0.99 if attached_obj_scale is None else attached_obj_scale[ee_link_name] + + self.mg[emb_sel].attach_objects_to_robot( + joint_state=cu_js_batch, + object_names=obj_paths, + ee_pose=ee_pose, + link_name=self.robot.curobo_attached_object_link_names[ee_link_name], + scale=scale, + pitch_scale=1.0, + merge_meshes=True, + ) + + attached_info.append( + {"obj_paths": obj_paths, "link_name": self.robot.curobo_attached_object_link_names[ee_link_name]} + ) + + return attached_info + + def _detach_objects_from_robot( + self, + attached_info, + emb_sel, + ): + """ + Helper function to detach previously attached objects from the robot. + + Args: + attached_info (list): List of dictionaries containing object paths and link names + returned by _attach_objects_to_robot + emb_sel (CuRoboEmbodimentSelection): Which embodiment selection to use + """ + for info in attached_info: + self.mg[emb_sel].detach_object_from_robot( + object_names=info["obj_paths"], + link_name=info["link_name"], + ) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 155d3d00b..4340ffb5f 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -836,7 +836,6 @@ def _move_hand( target_pose, avoid_collision=True, arm=None, - attached_obj=None, motion_constraint=None, low_precision=False, lock_auxiliary_arm=False, @@ -899,15 +898,12 @@ def _plan_joint_motion( target_pos, target_quat, embodiment_selection=CuRoboEmbodimentSelection.DEFAULT, - attached_obj=None, motion_constraint=None, ): - if attached_obj is None: - # If an object is grasped, we need to pass it to the motion planner - obj_in_hand = self._get_obj_in_hand() - if obj_in_hand is not None: - # TODO: this root link logic is bad, fix it - attached_obj = {self.robot.eef_link_names[self.arm]: obj_in_hand.root_link} + # If an object is grasped, we need to pass it to the motion planner + obj_in_hand = self._get_obj_in_hand() + attached_obj = {self.robot.eef_link_names[self.arm]: obj_in_hand.root_link} if obj_in_hand is not None else None + planning_attempts = 0 success = False traj_path = None @@ -1456,7 +1452,7 @@ def _empty_action(self, follow_arm_targets=True): action[action_idx] = partial_action return action - def _reset_robot(self, attached_obj=None): + def _reset_robot(self): """ Yields action to move both hands to the position optimal for executing subsequent action primitives @@ -1473,12 +1469,11 @@ def _reset_robot(self, attached_obj=None): target_pos=target_pos, target_quat=target_quat, embodiment_selection=CuRoboEmbodimentSelection.ARM, - attached_obj=attached_obj, ) indented_print(f"Plan has {len(q_traj)} steps") yield from self._execute_motion_plan(q_traj, low_precision=True) - def _reset_hand(self, arm=None, attached_obj=None): + def _reset_hand(self, arm=None): """ Yields action to move the hand to the position optimal for executing subsequent action primitives @@ -1492,7 +1487,7 @@ def _reset_hand(self, arm=None, attached_obj=None): reset_eef_pose = self._get_reset_eef_pose("world")[arm] if self.debug_visual_marker is not None: self.debug_visual_marker.set_position_orientation(*reset_eef_pose) - yield from self._move_hand(reset_eef_pose, arm=arm, attached_obj=attached_obj, low_precision=True) + yield from self._move_hand(reset_eef_pose, arm=arm, low_precision=True) def _get_reset_eef_pose(self, frame="robot"): """ @@ -1996,8 +1991,16 @@ def _validate_poses(self, candidate_poses, pose_on_obj=None, arm=None, skip_obst candidate_joint_positions.append(joint_pos) candidate_joint_positions = th.stack(candidate_joint_positions) + + # If an object is grasped, we need to pass it to the collision checker + obj_in_hand = self._get_obj_in_hand() + attached_obj = {self.robot.eef_link_names[self.arm]: obj_in_hand.root_link} if obj_in_hand is not None else None + invalid_results = self._motion_generator.check_collisions( - candidate_joint_positions, self_collision_check=False, skip_obstacle_update=skip_obstacle_update + candidate_joint_positions, + self_collision_check=False, + skip_obstacle_update=skip_obstacle_update, + attached_obj=attached_obj, ).cpu() # For each candidate that passed collision check, verify reachability From 86d5f9b0762359e952388d4e26bf2acc01415292 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Fri, 3 Jan 2025 14:59:31 -0800 Subject: [PATCH 33/76] allows user to pick robot for primitives rs int example --- omnigibson/examples/action_primitives/rs_int_example.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/omnigibson/examples/action_primitives/rs_int_example.py b/omnigibson/examples/action_primitives/rs_int_example.py index 9fb7371a3..c6a212da1 100644 --- a/omnigibson/examples/action_primitives/rs_int_example.py +++ b/omnigibson/examples/action_primitives/rs_int_example.py @@ -10,6 +10,7 @@ ) from omnigibson.macros import gm from omnigibson.robots.tiago import Tiago +from omnigibson.utils.ui_utils import choose_from_options # Don't use GPU dynamics and use flatcache for performance boost # gm.USE_GPU_DYNAMICS = True @@ -27,8 +28,11 @@ def main(): It loads Rs_int with a robot, and the robot picks and places an apple. """ + robot_options = ["R1", "Tiago"] + robot_type = choose_from_options(options=robot_options, name="robot options", random_selection=False) + # Load the config - config_filename = os.path.join(og.example_config_path, "tiago_primitives.yaml") + config_filename = os.path.join(og.example_config_path, f"{robot_type.lower()}_primitives.yaml") config = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader) config["scene"]["scene_model"] = "Rs_int" From b26aa0be7d76175c10b3b15d0df22593774a490a Mon Sep 17 00:00:00 2001 From: hang-yin Date: Fri, 3 Jan 2025 15:14:56 -0800 Subject: [PATCH 34/76] Make collision activation distance configurable; add comments to motion constraint vector --- omnigibson/action_primitives/curobo.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/omnigibson/action_primitives/curobo.py b/omnigibson/action_primitives/curobo.py index bef38f0c2..ffa83041a 100644 --- a/omnigibson/action_primitives/curobo.py +++ b/omnigibson/action_primitives/curobo.py @@ -150,6 +150,7 @@ def __init__( use_cuda_graph=True, debug=False, use_default_embodiment_only=False, + collision_activation_distance=0.01, ): """ Args: @@ -165,6 +166,9 @@ def __init__( use_cuda_graph (bool): Whether to use CUDA graph for motion generation or not debug (bool): Whether to debug generation or not, setting this True will set use_cuda_graph to False implicitly use_default_embodiment_only (bool): Whether to use only the default embodiment for the robot or not + collision_activation_distance (float): Activation distance for collision checking; this affects + 1) how close the computed trajectories can get to obstacles and + 2) how close the robot can get to obstacles during collision checks """ # Only support one scene for now -- verify that this is the case assert len(og.sim.scenes) == 1 @@ -225,7 +229,7 @@ def __init__( num_trajopt_seeds=4, num_graph_seeds=4, interpolation_dt=0.03, - collision_activation_distance=0.05, + collision_activation_distance=collision_activation_distance, self_collision_check=True, maximum_trajectory_dt=None, fixed_iters_trajopt=True, @@ -628,6 +632,12 @@ def compute_trajectories( ) # Add the pose cost metric + # Details can be found here: https://curobo.org/advanced_examples/3_constrained_planning.html + # The motion constraint vector is a 6D vector controlling end-effector movement: + # Angular constraints: [qx, qy, qz] - rotation around each axis + # Linear constraints: [x, y, z] - translation along each axis + # Setting any component to 1.0 locks that axis, forcing the planner to reach + # the target using only the remaining unlocked axes if motion_constraint is None: motion_constraint = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] pose_cost_metric = lazy.curobo.wrap.reacher.motion_gen.PoseCostMetric( From 02d6266fdddabfa04cf1faf9022dbd8605fa523f Mon Sep 17 00:00:00 2001 From: hang-yin Date: Mon, 6 Jan 2025 13:22:16 -0800 Subject: [PATCH 35/76] More fixes for primitives --- .../starter_semantic_action_primitives.py | 166 +++++------------- omnigibson/configs/r1_primitives.yaml | 5 +- omnigibson/configs/tiago_primitives.yaml | 13 +- omnigibson/controllers/joint_controller.py | 2 +- .../action_primitives/rs_int_example.py | 4 +- .../action_primitives/solve_simple_task.py | 16 -- omnigibson/robots/r1.py | 32 +--- omnigibson/utils/grasping_planning_utils.py | 1 - 8 files changed, 57 insertions(+), 182 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 4340ffb5f..497bf26a2 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -81,10 +81,12 @@ m.MAX_ATTEMPTS_FOR_OPEN_CLOSE = 20 m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_WITH_OBJECT_AND_PREDICATE = 20 -m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT = 100 +m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT = 200 m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_IN_ROOM = 60 m.MAX_ATTEMPTS_FOR_SAMPLING_PLACE_POSE = 20 m.PREDICATE_SAMPLING_Z_OFFSET = 0.02 +m.BASE_POSE_SAMPLING_LOWER_BOUND = 0.0 +m.BASE_POSE_SAMPLING_UPPER_BOUND = 1.5 m.GRASP_APPROACH_DISTANCE = 0.01 m.OPEN_GRASP_APPROACH_DISTANCE = 0.4 @@ -95,9 +97,8 @@ m.LOW_PRECISION_DIST_THRESHOLD = 0.1 m.LOW_PRECISION_ANGLE_THRESHOLD = 0.2 -m.TORSO_FIXED = False m.JOINT_POS_DIFF_THRESHOLD = 0.01 -m.LOW_PRECISION_JOINT_POS_DIFF_THRESHOLD = 0.1 +m.LOW_PRECISION_JOINT_POS_DIFF_THRESHOLD = 0.05 m.JOINT_CONTROL_MIN_ACTION = 0.0 m.MAX_ALLOWED_JOINT_ERROR_FOR_LINEAR_MOTION = math.radians(45) m.TIME_BEFORE_JOINT_STUCK_CHECK = 1.0 @@ -532,7 +533,7 @@ def _grasp(self, obj): yield from self._navigate_if_needed(obj, pose_on_obj=grasp_pose) indented_print("Moving hand to grasp pose") - yield from self._move_hand(grasp_pose, motion_constraint=[0, 0, 0, 0, 1, 0]) + yield from self._move_hand(grasp_pose) if self.robot.grasping_mode == "sticky": # Pre-grasp in sticky grasping mode. @@ -545,7 +546,7 @@ def _grasp(self, obj): yield from self._move_hand(approach_pose, avoid_collision=False) elif self.robot.grasping_mode == "assisted": indented_print("Performing grasp approach") - yield from self._move_hand(approach_pose, motion_constraint=[0, 0, 0, 0, 0, 1]) + yield from self._move_hand(approach_pose) yield from self._execute_grasp() # Step a few times to update @@ -677,7 +678,7 @@ def _place_with_predicate(self, obj, predicate): yield from self._move_hand(directly_move_hand_pose) else: for candidate in pose_candidates: - valid_navigation_pose = self._sample_pose_near_object(obj, pose_on_obj=candidate) + valid_navigation_pose = self._sample_pose_near_object(obj, pose_on_obj=candidate, sampling_attempts=20) if valid_navigation_pose is None: continue else: @@ -904,7 +905,6 @@ def _plan_joint_motion( obj_in_hand = self._get_obj_in_hand() attached_obj = {self.robot.eef_link_names[self.arm]: obj_in_hand.root_link} if obj_in_hand is not None else None - planning_attempts = 0 success = False traj_path = None # aggregate target_pos and target_quat to match batch_size @@ -914,30 +914,27 @@ def _plan_joint_motion( } self._motion_generator.update_obstacles() - while not success and planning_attempts < m.MAX_PLANNING_ATTEMPTS: - successes, traj_paths = self._motion_generator.compute_trajectories( - target_pos=target_pos, - target_quat=target_quat, - is_local=False, - max_attempts=5, - timeout=60.0, - ik_fail_return=5, - enable_finetune_trajopt=True, - finetune_attempts=1, - return_full_result=False, - success_ratio=1.0, - attached_obj=attached_obj, - motion_constraint=motion_constraint, - skip_obstacle_update=True, - emb_sel=embodiment_selection, - ) - # Grab the first successful trajectory, if not found, then continue planning - success_idx = th.where(successes)[0].cpu() - if len(success_idx) > 0: - success = True - traj_path = traj_paths[success_idx[0]] - else: - planning_attempts += self._motion_generator.batch_size + successes, traj_paths = self._motion_generator.compute_trajectories( + target_pos=target_pos, + target_quat=target_quat, + is_local=False, + max_attempts=math.ceil(m.MAX_PLANNING_ATTEMPTS / self._motion_generator.batch_size), + timeout=60.0, + ik_fail_return=5, + enable_finetune_trajopt=True, + finetune_attempts=1, + return_full_result=False, + success_ratio=1.0, + attached_obj=attached_obj, + motion_constraint=motion_constraint, + skip_obstacle_update=True, + emb_sel=embodiment_selection, + ) + # Grab the first successful trajectory, if not found, then continue planning + success_idx = th.where(successes)[0].cpu() + if len(success_idx) > 0: + success = True + traj_path = traj_paths[success_idx[0]] if not success: raise ActionPrimitiveError( ActionPrimitiveError.Reason.PLANNING_ERROR, @@ -979,7 +976,7 @@ def _execute_motion_plan( articulation_threshold = ( m.JOINT_POS_DIFF_THRESHOLD if not low_precision else m.LOW_PRECISION_JOINT_POS_DIFF_THRESHOLD ) - if th.max(th.abs(articulation_joint_diff)).item() < m.JOINT_POS_DIFF_THRESHOLD: + if th.max(th.abs(articulation_joint_diff)).item() < articulation_threshold: articulation_target_reached = True # TODO: genralize this to transaltion&rotation + high/low precision modes if th.max(th.abs(base_joint_diff)).item() < m.DEFAULT_DIST_THRESHOLD: @@ -1401,7 +1398,7 @@ def _get_head_goal_q(self, target_obj_pose): # if not possible to look at object, return current head joint positions else: - default_head_pos = self._get_reset_joint_pos()[self.robot.controller_action_idx["camera"]] + default_head_pos = self.robot.reset_joint_pos[self.robot.controller_action_idx["camera"]] head1_joint_goal = default_head_pos[0] head2_joint_goal = default_head_pos[1] @@ -1499,44 +1496,26 @@ def _get_reset_eef_pose(self, frame="robot"): Returns: dict of th.tensor: The reset eef pose for each robot arm """ - if isinstance(self.robot, Fetch): - pose = { - self.arm: ( - th.tensor([0.48688125, -0.12507881, 0.97888719]), - th.tensor([0.61324748, 0.61305553, -0.35266518, 0.35173529]), - ) - } - elif isinstance(self.robot, R1): + if isinstance(self.robot, R1): pose = { self.robot.arm_names[0]: ( - th.tensor([0.43, 0.2, 1.2]), + th.tensor([0.45, 0.183, 1.23]), th.tensor([1.0, 0.0, 0.0, 0.0]), ), self.robot.arm_names[1]: ( - th.tensor([0.43, -0.2, 1.2]), + th.tensor([0.45, -0.183, 1.23]), th.tensor([-1.0, 0.0, 0.0, 0.0]), ), } elif isinstance(self.robot, Tiago): - # TODO: default trunk position vs. raised trunk position - # pose = { - # self.robot.arm_names[0]: ( - # th.tensor([0.4997, 0.2497, 0.6357]), - # th.tensor([-0.5609, 0.5617, 0.4299, 0.4302]), - # ), - # self.robot.arm_names[1]: ( - # th.tensor([0.4978, -0.2521, 0.6357]), - # th.tensor([-0.5609, -0.5617, 0.4299, -0.4302]), - # ), - # } pose = { self.robot.arm_names[0]: ( - th.tensor([0.5021, 0.2458, 0.7648]), - th.tensor([-0.5599, 0.5649, 0.4303, 0.4269]), + th.tensor([0.5, 0.23, 0.85]), + th.tensor([0.7071, -0.7071, 0.0000, -0.0000]), ), self.robot.arm_names[1]: ( - th.tensor([0.4999, -0.2486, 0.7633]), - th.tensor([-0.5592, -0.5646, 0.4311, -0.4274]), + th.tensor([0.5, -0.23, 0.85]), + th.tensor([-0.7071, -0.7071, -0.0000, -0.0000]), ), } else: @@ -1548,64 +1527,6 @@ def _get_reset_eef_pose(self, frame="robot"): else: raise ValueError(f"Unsupported frame: {frame}") - def _get_reset_joint_pos(self): - reset_pose_fetch = th.tensor( - [ - 0.0, - 0.0, # wheels - 0.0, # trunk - 0.0, - -1.0, - 0.0, # head - -1.0, - 1.53448, - 2.2, - 0.0, - 1.36904, - 1.90996, # arm - 0.05, - 0.05, # gripper - ] - ) - - reset_pose_tiago = th.tensor( - [ - -1.78029833e-04, - 3.20231302e-05, - -1.85759447e-07, - 0.0, - -0.2, - 0.0, - 0.1, - -6.10000000e-01, - -1.10000000e00, - 0.00000000e00, - -1.10000000e00, - 1.47000000e00, - 0.00000000e00, - 8.70000000e-01, - 2.71000000e00, - 1.50000000e00, - 1.71000000e00, - -1.50000000e00, - -1.57000000e00, - 4.50000000e-01, - 1.39000000e00, - 0.00000000e00, - 0.00000000e00, - 4.50000000e-02, - 4.50000000e-02, - 4.50000000e-02, - 4.50000000e-02, - ] - ) - if isinstance(self.robot, Fetch): - return reset_pose_fetch - elif isinstance(self.robot, Tiago): - return reset_pose_tiago - else: - raise ValueError(f"Unsupported robot model: {type(self.robot)}") - def _navigate_to_pose(self, pose_2d): """ Yields the action to navigate robot to the specified 2d pose @@ -1617,15 +1538,13 @@ def _navigate_to_pose(self, pose_2d): th.tensor or None: Action array for one step for the robot to navigate or None if it is done navigating """ pose_3d = self._get_robot_pose_from_2d_pose(pose_2d) - # TODO: why was this 0.1? - pose_3d[0][2] = 0.0 if self.debug_visual_marker is not None: self.debug_visual_marker.set_position_orientation(*pose_3d) target_pos = {self.robot.base_footprint_link_name: pose_3d[0]} target_quat = {self.robot.base_footprint_link_name: pose_3d[1]} q_traj = self._plan_joint_motion(target_pos, target_quat, CuRoboEmbodimentSelection.BASE) - yield from self._execute_motion_plan(q_traj, stop_on_contact=True) + yield from self._execute_motion_plan(q_traj, stop_on_contact=False) def _draw_plan(self, plan): SEARCHED = [] @@ -1803,7 +1722,9 @@ def _rotate_in_place(self, end_pose, angle_threshold=m.DEFAULT_ANGLE_THRESHOLD): empty_action = self._empty_action() yield self._postprocess_action(empty_action) - def _sample_pose_near_object(self, obj, pose_on_obj=None, **kwargs): + def _sample_pose_near_object( + self, obj, pose_on_obj=None, sampling_attempts=m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT, **kwargs + ): """ Returns a 2d pose for the robot within in the range of the object and where the robot is not in collision with anything @@ -1817,8 +1738,7 @@ def _sample_pose_near_object(self, obj, pose_on_obj=None, **kwargs): - 3-array: (x,y,z) Position in the world frame - 4-array: (x,y,z,w) Quaternion orientation in the world frame """ - # TODO: make this a macro - distance_lo, distance_hi = 0.0, 2.0 + distance_lo, distance_hi = m.BASE_POSE_SAMPLING_LOWER_BOUND, m.BASE_POSE_SAMPLING_UPPER_BOUND yaw_lo, yaw_hi = -math.pi, math.pi avg_arm_workspace_range = th.mean(self.robot.arm_workspace_range[self.arm]) @@ -1833,7 +1753,7 @@ def _sample_pose_near_object(self, obj, pose_on_obj=None, **kwargs): attempt = 0 # Update obstacle once before sampling self._motion_generator.update_obstacles() - while attempt < m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT: + while attempt < sampling_attempts: candidate_poses = [] for _ in range(self._collision_check_batch_size): while True: diff --git a/omnigibson/configs/r1_primitives.yaml b/omnigibson/configs/r1_primitives.yaml index f7b42c9d4..457ab1592 100644 --- a/omnigibson/configs/r1_primitives.yaml +++ b/omnigibson/configs/r1_primitives.yaml @@ -58,7 +58,6 @@ robots: use_impedances: true arm_left: name: JointController - # subsume_controllers: [trunk] motor_type: position command_input_limits: null use_delta_commands: false @@ -74,13 +73,13 @@ robots: motor_type: position command_input_limits: null use_delta_commands: false - use_impedances: true + use_impedances: false gripper_right: name: JointController motor_type: position command_input_limits: null use_delta_commands: false - use_impedances: true + use_impedances: false camera: name: JointController diff --git a/omnigibson/configs/tiago_primitives.yaml b/omnigibson/configs/tiago_primitives.yaml index d3625c8bd..73eb4f499 100644 --- a/omnigibson/configs/tiago_primitives.yaml +++ b/omnigibson/configs/tiago_primitives.yaml @@ -80,7 +80,7 @@ robots: use_delta_commands: false use_impedances: true pos_kp: 200 - pos_ki: 0.5 + pos_ki: 5.0 max_integral_error: 10.0 trunk: name: JointController @@ -91,13 +91,12 @@ robots: pos_kp: 500 arm_left: name: JointController - # subsume_controllers: [trunk] motor_type: position command_input_limits: null use_delta_commands: false use_impedances: true pos_kp: 500 - pos_ki: 0.5 + pos_ki: 5.0 max_integral_error: 10.0 arm_right: name: JointController @@ -106,22 +105,20 @@ robots: use_delta_commands: false use_impedances: true pos_kp: 500 - pos_ki: 0.5 + pos_ki: 5.0 max_integral_error: 10.0 gripper_left: name: JointController motor_type: position command_input_limits: null use_delta_commands: false - use_impedances: true - pos_kp: 200 + use_impedances: false gripper_right: name: JointController motor_type: position command_input_limits: null use_delta_commands: false - use_impedances: true - pos_kp: 200 + use_impedances: false camera: name: JointController diff --git a/omnigibson/controllers/joint_controller.py b/omnigibson/controllers/joint_controller.py index 26710b24f..979777449 100644 --- a/omnigibson/controllers/joint_controller.py +++ b/omnigibson/controllers/joint_controller.py @@ -20,7 +20,7 @@ # Create settings for this module m = create_module_macros(module_path=__file__) m.DEFAULT_JOINT_POS_KP = 50.0 -m.DEFAULT_JOINT_POS_KI = 0.1 +m.DEFAULT_JOINT_POS_KI = 0.0 # do not use integral term by default m.DEFAULT_JOINT_MAX_INTEGRAL_ERROR = 1.0 m.DEFAULT_JOINT_POS_DAMPING_RATIO = 1.0 # critically damped m.DEFAULT_JOINT_VEL_KP = 2.0 diff --git a/omnigibson/examples/action_primitives/rs_int_example.py b/omnigibson/examples/action_primitives/rs_int_example.py index c6a212da1..534450510 100644 --- a/omnigibson/examples/action_primitives/rs_int_example.py +++ b/omnigibson/examples/action_primitives/rs_int_example.py @@ -13,8 +13,8 @@ from omnigibson.utils.ui_utils import choose_from_options # Don't use GPU dynamics and use flatcache for performance boost -# gm.USE_GPU_DYNAMICS = True -# gm.ENABLE_FLATCACHE = True +gm.USE_GPU_DYNAMICS = False +gm.ENABLE_FLATCACHE = True def execute_controller(ctrl_gen, env): diff --git a/omnigibson/examples/action_primitives/solve_simple_task.py b/omnigibson/examples/action_primitives/solve_simple_task.py index 7f2c8fe3b..946151b65 100644 --- a/omnigibson/examples/action_primitives/solve_simple_task.py +++ b/omnigibson/examples/action_primitives/solve_simple_task.py @@ -58,22 +58,6 @@ def main(): scene = env.scene robot = env.robots[0] - # Open the gripper(s) to match cuRobo's default state - for arm_name in robot.gripper_control_idx.keys(): - grpiper_control_idx = robot.gripper_control_idx[arm_name] - robot.set_joint_positions(th.ones_like(grpiper_control_idx), indices=grpiper_control_idx, normalized=True) - robot.keep_still() - - for _ in range(5): - og.sim.step() - - env.scene.update_initial_state() - env.scene.reset() - - # Let the object settle - for _ in range(30): - og.sim.step() - # Allow user to move camera more easily og.sim.enable_viewer_camera_teleoperation() diff --git a/omnigibson/robots/r1.py b/omnigibson/robots/r1.py index 49d6cf723..ce53112b4 100644 --- a/omnigibson/robots/r1.py +++ b/omnigibson/robots/r1.py @@ -168,9 +168,10 @@ def untucked_default_joint_pos(self): pos = th.zeros(self.n_dof) # Keep the current joint positions for the base joints pos[self.base_idx] = self.get_joint_positions()[self.base_idx] + pos[self.arm_control_idx["left"]] = th.tensor([-0.0464, 2.6172, -1.4584, -0.0433, 1.5899, -1.1587]) + pos[self.arm_control_idx["right"]] = th.tensor([0.0464, 2.6168, -1.4570, 0.0418, -1.5896, 1.1593]) for arm in self.arm_names: pos[self.gripper_control_idx[arm]] = th.tensor([0.03, 0.03]) # open gripper - pos[self.arm_control_idx[arm]] = th.tensor([0.0, 1.906, -0.991, 1.571, 0.915, -1.571]) return pos @property @@ -247,33 +248,8 @@ def disabled_collision_pairs(self): return [ ["left_gripper_link1", "left_gripper_link2"], ["right_gripper_link1", "right_gripper_link2"], - ["base_link", "torso_link1"], - ["base_link", "servo_link1"], - ["base_link", "wheel_link3"], - ["base_link", "servo_link3"], - ["base_link", "servo_link2"], ["base_link", "wheel_link1"], ["base_link", "wheel_link2"], - ["servo_link1", "wheel_link1"], - ["servo_link2", "wheel_link2"], - ["servo_link3", "wheel_link3"], - ["torso_link1", "torso_link2"], - ["torso_link2", "torso_link3"], - ["torso_link3", "torso_link4"], - ["torso_link4", "left_arm_link1"], - ["left_arm_link1", "left_arm_link2"], - ["left_arm_link2", "left_arm_link3"], - ["left_arm_link3", "left_arm_link4"], - ["left_arm_link4", "left_arm_link5"], - ["left_arm_link5", "left_arm_link6"], - ["left_arm_link6", "left_gripper_link1"], - ["left_arm_link6", "left_gripper_link2"], - ["torso_link4", "right_arm_link1"], - ["right_arm_link1", "right_arm_link2"], - ["right_arm_link2", "right_arm_link3"], - ["right_arm_link3", "right_arm_link4"], - ["right_arm_link4", "right_arm_link5"], - ["right_arm_link5", "right_arm_link6"], - ["right_arm_link6", "right_gripper_link1"], - ["right_arm_link6", "right_gripper_link2"], + ["base_link", "wheel_link3"], + ["torso_link2", "torso_link4"], ] diff --git a/omnigibson/utils/grasping_planning_utils.py b/omnigibson/utils/grasping_planning_utils.py index 6dfa46f7e..86bede5c6 100644 --- a/omnigibson/utils/grasping_planning_utils.py +++ b/omnigibson/utils/grasping_planning_utils.py @@ -38,7 +38,6 @@ def get_grasp_poses_for_object_sticky(target_obj): towards_object_in_world_frame = bbox_center_world - grasp_center_pos towards_object_in_world_frame /= th.norm(towards_object_in_world_frame) - # TODO: why was this pi/2 in the y-axis? grasp_quat = T.euler2quat(th.tensor([0, 0, 0], dtype=th.float32)) grasp_pose = (grasp_center_pos, grasp_quat) From c7ccf5a095b5e543cec8429452a88d7bcf5d43a8 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Thu, 9 Jan 2025 13:50:12 -0800 Subject: [PATCH 36/76] Small cuRobo changes --- omnigibson/action_primitives/curobo.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/omnigibson/action_primitives/curobo.py b/omnigibson/action_primitives/curobo.py index ffa83041a..1febbb8e8 100644 --- a/omnigibson/action_primitives/curobo.py +++ b/omnigibson/action_primitives/curobo.py @@ -7,7 +7,7 @@ import omnigibson as og import omnigibson.lazy as lazy import omnigibson.utils.transform_utils as T -from omnigibson.macros import create_module_macros +from omnigibson.macros import create_module_macros, gm from omnigibson.object_states.factory import METALINK_PREFIXES from omnigibson.prims.rigid_prim import RigidPrim from omnigibson.robots.holonomic_base_robot import HolonomicBaseRobot @@ -150,7 +150,7 @@ def __init__( use_cuda_graph=True, debug=False, use_default_embodiment_only=False, - collision_activation_distance=0.01, + collision_activation_distance=0.005, ): """ Args: @@ -173,6 +173,8 @@ def __init__( # Only support one scene for now -- verify that this is the case assert len(og.sim.scenes) == 1 + assert not gm.ENABLE_FLATCACHE, "CuRobo does not support flatcache for now" + # Store internal variables self._tensor_args = lazy.curobo.types.base.TensorDeviceType(device=th.device(device)) self.debug = debug From a8ce45fd3ddfb86ce35aa39e5bd980dfda93ee51 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Thu, 9 Jan 2025 13:59:58 -0800 Subject: [PATCH 37/76] Bug fix - implement holonomic base joint controller --- omnigibson/controllers/__init__.py | 1 + .../holonomic_base_joint_controller.py | 136 ++++++++++++++++++ omnigibson/robots/holonomic_base_robot.py | 72 ++++++---- omnigibson/robots/r1.py | 2 +- omnigibson/robots/tiago.py | 2 +- omnigibson/utils/ui_utils.py | 2 +- omnigibson/utils/usd_utils.py | 4 + 7 files changed, 190 insertions(+), 29 deletions(-) create mode 100644 omnigibson/controllers/holonomic_base_joint_controller.py diff --git a/omnigibson/controllers/__init__.py b/omnigibson/controllers/__init__.py index a56c7c54b..2af5c752f 100644 --- a/omnigibson/controllers/__init__.py +++ b/omnigibson/controllers/__init__.py @@ -9,6 +9,7 @@ ManipulationController, ) from omnigibson.controllers.dd_controller import DifferentialDriveController +from omnigibson.controllers.holonomic_base_joint_controller import HolonomicBaseJointController from omnigibson.controllers.ik_controller import InverseKinematicsController from omnigibson.controllers.joint_controller import JointController from omnigibson.controllers.multi_finger_gripper_controller import MultiFingerGripperController diff --git a/omnigibson/controllers/holonomic_base_joint_controller.py b/omnigibson/controllers/holonomic_base_joint_controller.py new file mode 100644 index 000000000..6790513a0 --- /dev/null +++ b/omnigibson/controllers/holonomic_base_joint_controller.py @@ -0,0 +1,136 @@ +import torch as th + +import omnigibson.utils.transform_utils as T +from omnigibson.controllers import JointController +from omnigibson.macros import create_module_macros + + +class HolonomicBaseJointController(JointController): + """ + Controller class for holonomic base joint control. This is a very specific type of controller used to model control of a 3DOF + holonomic robot base -- i.e.: free motion along (x, y, rz). + + NOTE: Inputted commands are ALWAYS assumed to be in the form of absolute values (defined in the robot's local frame), not deltas! + NOTE: Also assumes commands are ALWAYS inputted in the form of [x, y, rz]! + + Each controller step consists of the following: + 1. Clip + Scale inputted command according to @command_input_limits and @command_output_limits + 2a. If using delta commands, then adds the command to the current joint state + 2b. Clips the resulting command by the motor limits + """ + + def __init__( + self, + control_freq, + motor_type, + control_limits, + dof_idx, + command_input_limits=None, + command_output_limits=None, + pos_kp=None, + pos_damping_ratio=None, + pos_ki=None, + max_integral_error=None, + vel_kp=None, + use_impedances=False, + use_gravity_compensation=False, + use_cc_compensation=True, + ): + """ + Args: + control_freq (int): controller loop frequency + motor_type (str): type of motor being controlled, one of {position, velocity, effort} + control_limits (Dict[str, Tuple[Array[float], Array[float]]]): The min/max limits to the outputted + control signal. Should specify per-dof type limits, i.e.: + + "position": [[min], [max]] + "velocity": [[min], [max]] + "effort": [[min], [max]] + "has_limit": [...bool...] + + Values outside of this range will be clipped, if the corresponding joint index in has_limit is True. + dof_idx (Array[int]): specific dof indices controlled by this robot. Used for inferring + controller-relevant values during control computations + command_input_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]): + if set, is the min/max acceptable inputted command. Values outside this range will be clipped. + If None, no clipping will be used. If "default", range will be set to (-1, 1) + command_output_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]): + if set, is the min/max scaled command. If both this value and @command_input_limits is not None, + then all inputted command values will be scaled from the input range to the output range. + If either is None, no scaling will be used. If "default", then this range will automatically be set + to the @control_limits entry corresponding to self.control_type + pos_kp (None or float): If @motor_type is "position" and @use_impedances=True, this is the + proportional gain applied to the joint controller. If None, a default value will be used. + pos_damping_ratio (None or float): If @motor_type is "position" and @use_impedances=True, this is the + damping ratio applied to the joint controller. If None, a default value will be used. + vel_kp (None or float): If @motor_type is "velocity" and @use_impedances=True, this is the + proportional gain applied to the joint controller. If None, a default value will be used. + use_impedances (bool): If True, will use impedances via the mass matrix to modify the desired efforts + applied + use_gravity_compensation (bool): If True, will add gravity compensation to the computed efforts. This is + an experimental feature that only works on fixed base robots. We do not recommend enabling this. + use_cc_compensation (bool): If True, will add Coriolis / centrifugal compensation to the computed efforts. + """ + # Make sure we're controlling exactly 3 DOFs + assert len(dof_idx) == 3, f"Expected 3 DOFs for holonomic base control, got {len(dof_idx)}" + + # Run super init + super().__init__( + control_freq=control_freq, + motor_type=motor_type, + control_limits=control_limits, + dof_idx=dof_idx, + command_input_limits=command_input_limits, + command_output_limits=command_output_limits, + pos_kp=pos_kp, + pos_damping_ratio=pos_damping_ratio, + pos_ki=pos_ki, + max_integral_error=max_integral_error, + vel_kp=vel_kp, + use_impedances=use_impedances, + use_gravity_compensation=use_gravity_compensation, + use_cc_compensation=use_cc_compensation, + ) + + def _update_goal(self, command, control_dict): + """ + Updates the goal command by transforming it from the robot's local frame to its canonical frame. + """ + base_pose = T.pose2mat((control_dict["root_pos"], control_dict["root_quat"])) + canonical_pose = T.pose2mat((control_dict["canonical_pos"], control_dict["canonical_quat"])) + canonical_to_base_pose = T.pose_inv(canonical_pose) @ base_pose + + if self.motor_type == "position": + # Handle position control mode + command_in_base_frame = th.eye(4) + command_in_base_frame[:2, 3] = command[:2] # Set x,y translation + command_in_base_frame[:3, :3] = T.euler2mat(th.tensor([0.0, 0.0, command[2]])) # Set rotation + + # Transform command to canonical frame + command_in_canonical_frame = canonical_to_base_pose @ command_in_base_frame + + # Extract position and yaw from transformed command + position = command_in_canonical_frame[:2, 3] + yaw = T.mat2euler(command_in_canonical_frame[:3, :3])[2:3] + command = th.cat([position, yaw]) + else: + # Handle velocity/effort control modes + # Note: Only rotate the commands, don't translate + rotation_matrix = canonical_to_base_pose[:3, :3] + + # Prepare poses for transformation + rotation_poses = th.zeros((2, 3, 3)) + rotation_poses[:, :3, :3] = rotation_matrix + + local_vectors = th.zeros((2, 3, 1)) + local_vectors[0, :, 0] = th.tensor([command[0], command[1], 0.0]) # Linear + local_vectors[1, :, 0] = th.tensor([0.0, 0.0, command[2]]) # Angular + + # Transform to global frame + global_vectors = rotation_poses @ local_vectors + linear_global = global_vectors[0] + angular_global = global_vectors[1] + + command = th.tensor([linear_global[0], linear_global[1], angular_global[2]]) + + return super()._update_goal(command=command, control_dict=control_dict) diff --git a/omnigibson/robots/holonomic_base_robot.py b/omnigibson/robots/holonomic_base_robot.py index 7aa506108..dc74c8678 100644 --- a/omnigibson/robots/holonomic_base_robot.py +++ b/omnigibson/robots/holonomic_base_robot.py @@ -10,6 +10,7 @@ from omnigibson.macros import create_module_macros from omnigibson.robots.locomotion_robot import LocomotionRobot from omnigibson.utils.python_utils import classproperty +from omnigibson.utils.usd_utils import ControllableObjectViewAPI m = create_module_macros(module_path=__file__) m.MAX_LINEAR_VELOCITY = 1.5 # linear velocity in meters/second @@ -100,6 +101,12 @@ def __init__( """ self._world_base_fixed_joint_prim = None + # Sanity check that the base controller is a HolonomicBaseJointController + if controller_config is not None and "base" in controller_config: + assert ( + controller_config["base"]["name"] == "HolonomicBaseJointController" + ), "Base controller must be a HolonomicBaseJointController!" + # Call super() method super().__init__( name=name, @@ -123,15 +130,41 @@ def __init__( ) @property - def _default_base_joint_controller_config(self): + def _default_controllers(self): + # Always call super first + controllers = super()._default_controllers + controllers["base"] = "HolonomicBaseJointController" + return controllers + + @property + def _default_holonomic_base_joint_controller_config(self): """ Returns: dict: Default base joint controller config to control this robot's base. Uses velocity control by default. """ - cfg = super()._default_base_joint_controller_config - # The default value is too small for the base joints - cfg["pos_kp"] = m.BASE_JOINT_CONTROLLER_POSITION_KP + return { + "name": "HolonomicBaseJointController", + "control_freq": self._control_freq, + "motor_type": "velocity", + "control_limits": self.control_limits, + "dof_idx": self.base_control_idx, + "command_output_limits": "default", + } + + @property + def _default_controller_config(self): + # Always run super method first + cfg = super()._default_controller_config + + # Add supported base controllers + cfg["base"] = { + self._default_holonomic_base_joint_controller_config[ + "name" + ]: self._default_holonomic_base_joint_controller_config, + self._default_base_null_joint_controller_config["name"]: self._default_base_null_joint_controller_config, + } + return cfg def _post_load(self): @@ -297,30 +330,17 @@ def get_angular_velocity(self) -> th.Tensor: # Note that the link we are interested in is self.base_footprint_link, not self.root_link return self.base_footprint_link.get_angular_velocity() - def _postprocess_control(self, control, control_type): - # Run super method first - u_vec, u_type_vec = super()._postprocess_control(control=control, control_type=control_type) + def get_control_dict(self): + fcns = super().get_control_dict() - # Change the control from base_footprint_link ("base_footprint") frame to root_link ("base_footprint_x") frame - base_orn = self.base_footprint_link.get_position_orientation()[1] - root_link_orn = self.root_link.get_position_orientation()[1] - - cur_orn_mat = T.quat2mat(root_link_orn).T @ T.quat2mat(base_orn) - cur_pose = th.zeros((2, 4, 4)) - cur_pose[:, :3, :3] = cur_orn_mat - cur_pose[:, 3, 3] = 1.0 - - local_pose = th.zeros((2, 4, 4)) - local_pose[:] = th.eye(4) - local_pose[:, :3, 3] = u_vec[self.base_idx].view(2, 3) - - # Rotate the linear and angular velocity to the desired frame - global_pose = cur_pose @ local_pose - lin_vel_global, ang_vel_global = global_pose[0, :3, 3], global_pose[1, :3, 3] - - u_vec[self.base_control_idx] = th.tensor([lin_vel_global[0], lin_vel_global[1], ang_vel_global[2]]) + # Add canonical position and orientation + fcns["_canonical_pos_quat"] = lambda: ControllableObjectViewAPI.get_root_position_orientation( + self.articulation_root_path + ) + fcns["canonical_pos"] = lambda: fcns["_canonical_pos_quat"][0] + fcns["canonical_quat"] = lambda: fcns["_canonical_pos_quat"][1] - return u_vec, u_type_vec + return fcns def teleop_data_to_action(self, teleop_action) -> th.Tensor: action = ManipulationRobot.teleop_data_to_action(self, teleop_action) diff --git a/omnigibson/robots/r1.py b/omnigibson/robots/r1.py index ce53112b4..643c09984 100644 --- a/omnigibson/robots/r1.py +++ b/omnigibson/robots/r1.py @@ -146,7 +146,7 @@ def _raw_controller_order(self): def _default_controllers(self): controllers = super()._default_controllers # We use joint controllers for base as default - controllers["base"] = "JointController" + controllers["base"] = "HolonomicBaseJointController" controllers["trunk"] = "JointController" # We use IK and multi finger gripper controllers as default for arm in self.arm_names: diff --git a/omnigibson/robots/tiago.py b/omnigibson/robots/tiago.py index 5ee13e1dc..e3676f20e 100644 --- a/omnigibson/robots/tiago.py +++ b/omnigibson/robots/tiago.py @@ -230,7 +230,7 @@ def _default_controllers(self): # Always call super first controllers = super()._default_controllers # We use joint controllers for base and camera as default - controllers["base"] = "JointController" + controllers["base"] = "HolonomicBaseJointController" controllers["trunk"] = "JointController" controllers["camera"] = "JointController" # We use multi finger gripper, and IK controllers for eefs as default diff --git a/omnigibson/utils/ui_utils.py b/omnigibson/utils/ui_utils.py index 4be556d63..11e815d34 100644 --- a/omnigibson/utils/ui_utils.py +++ b/omnigibson/utils/ui_utils.py @@ -724,7 +724,7 @@ def populate_keypress_mapping(self): # Iterate over all controller info and populate mapping for component, info in self.controller_info.items(): - if info["name"] == "JointController": + if info["name"] in ["JointController", "HolonomicBaseJointController"]: for i in range(info["command_dim"]): cmd_idx = info["start_idx"] + i self.joint_command_idx.append(cmd_idx) diff --git a/omnigibson/utils/usd_utils.py b/omnigibson/utils/usd_utils.py index 10939d81e..29680f61f 100644 --- a/omnigibson/utils/usd_utils.py +++ b/omnigibson/utils/usd_utils.py @@ -1233,6 +1233,10 @@ def set_joint_efforts(cls, prim_path, efforts, indices): def get_position_orientation(cls, prim_path): return cls._VIEWS_BY_PATTERN[cls._get_pattern_from_prim_path(prim_path)].get_position_orientation(prim_path) + @classmethod + def get_root_position_orientation(cls, prim_path): + return cls._VIEWS_BY_PATTERN[cls._get_pattern_from_prim_path(prim_path)].get_root_transform(prim_path) + @classmethod def get_linear_velocity(cls, prim_path): return cls._VIEWS_BY_PATTERN[cls._get_pattern_from_prim_path(prim_path)].get_linear_velocity(prim_path) From 3e0b36fb3670e675600808e426a5113f7df1c986 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Thu, 9 Jan 2025 14:03:30 -0800 Subject: [PATCH 38/76] Updated primitives cfg for holonomic base and no-impedance --- omnigibson/configs/r1_primitives.yaml | 15 +++++++------ omnigibson/configs/tiago_primitives.yaml | 27 ++++++++++-------------- 2 files changed, 19 insertions(+), 23 deletions(-) diff --git a/omnigibson/configs/r1_primitives.yaml b/omnigibson/configs/r1_primitives.yaml index 457ab1592..cf6e36cbe 100644 --- a/omnigibson/configs/r1_primitives.yaml +++ b/omnigibson/configs/r1_primitives.yaml @@ -44,30 +44,31 @@ robots: image_width: 128 controller_config: base: - name: JointController + name: HolonomicBaseJointController motor_type: position command_input_limits: null - use_delta_commands: false - use_impedances: true - pos_kp: 50 + use_impedances: false trunk: name: JointController motor_type: position command_input_limits: null + command_output_limits: null use_delta_commands: false - use_impedances: true + use_impedances: false arm_left: name: JointController motor_type: position command_input_limits: null + command_output_limits: null use_delta_commands: false - use_impedances: true + use_impedances: false arm_right: name: JointController motor_type: position command_input_limits: null + command_output_limits: null use_delta_commands: false - use_impedances: true + use_impedances: false gripper_left: name: JointController motor_type: position diff --git a/omnigibson/configs/tiago_primitives.yaml b/omnigibson/configs/tiago_primitives.yaml index 73eb4f499..a9356d39b 100644 --- a/omnigibson/configs/tiago_primitives.yaml +++ b/omnigibson/configs/tiago_primitives.yaml @@ -74,49 +74,44 @@ robots: image_width: 128 controller_config: base: - name: JointController + name: HolonomicBaseJointController motor_type: position command_input_limits: null - use_delta_commands: false - use_impedances: true - pos_kp: 200 - pos_ki: 5.0 - max_integral_error: 10.0 + command_output_limits: null + use_impedances: false trunk: name: JointController motor_type: position command_input_limits: null + command_output_limits: null use_delta_commands: false - use_impedances: true - pos_kp: 500 + use_impedances: false arm_left: name: JointController motor_type: position command_input_limits: null + command_output_limits: null use_delta_commands: false - use_impedances: true - pos_kp: 500 - pos_ki: 5.0 - max_integral_error: 10.0 + use_impedances: False arm_right: name: JointController motor_type: position command_input_limits: null + command_output_limits: null use_delta_commands: false - use_impedances: true - pos_kp: 500 - pos_ki: 5.0 - max_integral_error: 10.0 + use_impedances: false gripper_left: name: JointController motor_type: position command_input_limits: null + command_output_limits: null use_delta_commands: false use_impedances: false gripper_right: name: JointController motor_type: position command_input_limits: null + command_output_limits: null use_delta_commands: false use_impedances: false camera: From 2ca1c8fca0b8f055e22913fca2f78f99c6c02ea8 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Thu, 9 Jan 2025 17:04:09 -0800 Subject: [PATCH 39/76] Major primitives refactoring and bug fixes --- omnigibson/action_primitives/curobo.py | 7 +- .../starter_semantic_action_primitives.py | 190 ++++++++++-------- 2 files changed, 111 insertions(+), 86 deletions(-) diff --git a/omnigibson/action_primitives/curobo.py b/omnigibson/action_primitives/curobo.py index 1febbb8e8..f6c73383d 100644 --- a/omnigibson/action_primitives/curobo.py +++ b/omnigibson/action_primitives/curobo.py @@ -25,6 +25,9 @@ m.HOLONOMIC_BASE_PRISMATIC_JOINT_LIMIT = 5.0 # meters m.HOLONOMIC_BASE_REVOLUTE_JOINT_LIMIT = math.pi * 2 # radians +m.DEFAULT_COLLISION_ACTIVATION_DISTANCE = 0.005 +m.DEFAULT_ATTACHED_OBJECT_SCALE = 0.8 + class CuRoboEmbodimentSelection(str, Enum): BASE = "base" @@ -150,7 +153,7 @@ def __init__( use_cuda_graph=True, debug=False, use_default_embodiment_only=False, - collision_activation_distance=0.005, + collision_activation_distance=m.DEFAULT_COLLISION_ACTIVATION_DISTANCE, ): """ Args: @@ -915,7 +918,7 @@ def _attach_objects_to_robot( quaternion = quaternion[[3, 0, 1, 2]] ee_pose = lazy.curobo.types.math.Pose(position=position, quaternion=quaternion).to(self._tensor_args) - scale = 0.99 if attached_obj_scale is None else attached_obj_scale[ee_link_name] + scale = m.DEFAULT_ATTACHED_OBJECT_SCALE if attached_obj_scale is None else attached_obj_scale[ee_link_name] self.mg[emb_sel].attach_objects_to_robot( joint_state=cu_js_batch, diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 497bf26a2..001432052 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -1,7 +1,7 @@ """ WARNING! The StarterSemanticActionPrimitive is a work-in-progress and is only provided as an example. -It currently only works with Tiago and R1 with their JointControllers set to absolute position mode with impedance. +It currently only works with Tiago and R1 with their JointControllers set to absolute position mode. See provided tiago_primitives.yaml config file for an example. See examples/action_primitives for runnable examples. """ @@ -26,7 +26,12 @@ BaseActionPrimitiveSet, ) from omnigibson.action_primitives.curobo import CuRoboEmbodimentSelection, CuRoboMotionGenerator -from omnigibson.controllers import DifferentialDriveController, InverseKinematicsController, JointController +from omnigibson.controllers import ( + DifferentialDriveController, + HolonomicBaseJointController, + InverseKinematicsController, + JointController, +) from omnigibson.macros import create_module_macros from omnigibson.objects.object_base import BaseObject from omnigibson.robots import * @@ -42,8 +47,6 @@ m = create_module_macros(module_path=__file__) -m.DEFAULT_BODY_OFFSET_FROM_FLOOR = 0.0 - m.KP_LIN_VEL = { Tiago: 0.3, Fetch: 0.2, @@ -67,7 +70,8 @@ R1: 0.2, } -m.MAX_PLANNING_ATTEMPTS = 20 +m.MAX_PLANNING_ATTEMPTS = 100 +m.MAX_IK_FAILURES_BEFORE_RETURN = 50 m.MAX_STEPS_FOR_SETTLING = 500 @@ -83,7 +87,7 @@ m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_WITH_OBJECT_AND_PREDICATE = 20 m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT = 200 m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_IN_ROOM = 60 -m.MAX_ATTEMPTS_FOR_SAMPLING_PLACE_POSE = 20 +m.MAX_ATTEMPTS_FOR_SAMPLING_PLACE_POSE = 50 m.PREDICATE_SAMPLING_Z_OFFSET = 0.02 m.BASE_POSE_SAMPLING_LOWER_BOUND = 0.0 m.BASE_POSE_SAMPLING_UPPER_BOUND = 1.5 @@ -92,8 +96,8 @@ m.OPEN_GRASP_APPROACH_DISTANCE = 0.4 m.HAND_DIST_THRESHOLD = 0.002 -m.DEFAULT_DIST_THRESHOLD = 0.05 -m.DEFAULT_ANGLE_THRESHOLD = 0.05 +m.DEFAULT_DIST_THRESHOLD = 0.005 +m.DEFAULT_ANGLE_THRESHOLD = 0.1 m.LOW_PRECISION_DIST_THRESHOLD = 0.1 m.LOW_PRECISION_ANGLE_THRESHOLD = 0.2 @@ -157,7 +161,7 @@ def __init__( """ log.warning( "The StarterSemanticActionPrimitive is a work-in-progress and is only provided as an example. " - "It currently only works with Tiago and R1 with their JointControllers set to absolute position mode with impedance." + "It currently only works with Tiago and R1 with their JointControllers set to absolute position mode." ) super().__init__(env, robot) self.controller_functions = { @@ -516,7 +520,7 @@ def _grasp(self, obj): grasp_poses = get_grasp_poses_for_object_sticky(obj) grasp_pose, object_direction = random.choice(grasp_poses) - grasp_offset_in_z = self.robot.finger_lengths[self.arm] + m.GRASP_APPROACH_DISTANCE + grasp_offset_in_z = self.robot.finger_lengths[self.arm] / 2.0 + m.GRASP_APPROACH_DISTANCE # Adjust grasp pose with reset orientation and finger length offset reset_orientation = self._get_reset_eef_pose("world")[self.arm][1] @@ -543,7 +547,7 @@ def _grasp(self, obj): # It's okay if we can't go all the way because we run into the object. indented_print("Performing grasp approach") # Use direct IK to move the hand to the approach pose. - yield from self._move_hand(approach_pose, avoid_collision=False) + yield from self._move_hand(approach_pose, avoid_collision=False, ignore_paths=[obj.prim_path]) elif self.robot.grasping_mode == "assisted": indented_print("Performing grasp approach") yield from self._move_hand(approach_pose) @@ -658,35 +662,32 @@ def _place_with_predicate(self, obj, predicate): "You need to be grasping an object first to place it somewhere.", ) - # Sample location to place object - pose_candidates = [] - directly_move_hand_pose = None self._motion_generator.update_obstacles() - while len(pose_candidates) < m.MAX_ATTEMPTS_FOR_SAMPLING_PLACE_POSE: + target_in_reach = False + valid_navigation_pose = None + for _ in range(m.MAX_ATTEMPTS_FOR_SAMPLING_PLACE_POSE): + # Sample one pose at a time obj_pose = self._sample_pose_with_object_and_predicate(predicate, obj_in_hand, obj) - # TODO: why do we sample this orientation? obj_pose = (obj_pose[0], obj_in_hand.get_position_orientation()[1]) hand_pose = self._get_hand_pose_for_object_pose(obj_pose) - if self._target_in_reach_of_robot(hand_pose, skip_obstacle_update=True)[self.arm]: - directly_move_hand_pose = hand_pose + + # First check if we can directly move the hand there + target_in_reach = self._target_in_reach_of_robot(hand_pose, skip_obstacle_update=True)[self.arm] + if target_in_reach: + yield from self._move_hand(hand_pose) break - else: - pose_candidates.append(hand_pose) - valid_navigation_pose = None - if directly_move_hand_pose is not None: - yield from self._move_hand(directly_move_hand_pose) - else: - for candidate in pose_candidates: - valid_navigation_pose = self._sample_pose_near_object(obj, pose_on_obj=candidate, sampling_attempts=20) - if valid_navigation_pose is None: - continue - else: - yield from self._navigate_to_pose(valid_navigation_pose) - yield from self._move_hand(candidate) - break + # If not, try to find a valid navigation pose for this hand pose + valid_navigation_pose = self._sample_pose_near_object( + obj, pose_on_obj=hand_pose, sampling_attempts=10, skip_obstacle_update=True + ) - if valid_navigation_pose is None: + if valid_navigation_pose is not None: + yield from self._navigate_to_pose(valid_navigation_pose) + yield from self._move_hand(hand_pose) + break + + if not target_in_reach and valid_navigation_pose is None: raise ActionPrimitiveError( ActionPrimitiveError.Reason.PLANNING_ERROR, "Could not find a valid pose to place the object", @@ -817,8 +818,10 @@ def _ik_solver_cartesian_to_joint_space(self, target_pose, arm=None, skip_obstac target_pos=target_pos, target_quat=target_quat, is_local=False, + max_attempts=math.ceil(m.MAX_PLANNING_ATTEMPTS / self._motion_generator.batch_size), return_full_result=False, ik_only=True, + ik_fail_return=m.MAX_IK_FAILURES_BEFORE_RETURN, ik_world_collision_check=False, skip_obstacle_update=skip_obstacle_update, emb_sel=CuRoboEmbodimentSelection.ARM, @@ -840,12 +843,20 @@ def _move_hand( motion_constraint=None, low_precision=False, lock_auxiliary_arm=False, + ignore_paths=None, ): """ Yields action for the robot to move hand so the eef is in the target pose using the planner Args: target_pose (Iterable of array): Position and orientation arrays in an iterable for pose + avoid_collision (bool): Whether to avoid collision (this need to be false for grasping) + arm (str): Arm to use for the target pose + motion_constraint (MotionConstraint): Motion constraint for the motion + low_precision (bool): Whether to use low precision for the motion + lock_auxiliary_arm (bool): Whether to lock the other arm in place + ignore_paths (None or list of str): If specified, prim path substrings that should + be ignored when planning Returns: th.tensor or None: Action array for one step for the robot to move hand or None if its at the target pose @@ -854,42 +865,37 @@ def _move_hand( self.debug_visual_marker.set_position_orientation(*target_pose) if arm is None: arm = self.arm - if avoid_collision: - yield from self._settle_robot() - # curobo motion generator takes a pose but outputs joint positions - if not lock_auxiliary_arm: - target_pos = { - self.robot.eef_link_names[self.arm]: target_pose[0], - } - target_quat = { - self.robot.eef_link_names[self.arm]: target_pose[1], - } - else: - left_hand_pos, left_hand_quat = target_pose if arm == "left" else self.robot.get_eef_pose(arm="left") - right_hand_pos, right_hand_quat = ( - target_pose if arm == "right" else self.robot.get_eef_pose(arm="right") - ) - target_pos = { - self.robot.eef_link_names["left"]: left_hand_pos, - self.robot.eef_link_names["right"]: right_hand_pos, - } - target_quat = { - self.robot.eef_link_names["left"]: left_hand_quat, - self.robot.eef_link_names["right"]: right_hand_quat, - } - q_traj = self._plan_joint_motion( - target_pos=target_pos, - target_quat=target_quat, - embodiment_selection=CuRoboEmbodimentSelection.ARM, - motion_constraint=motion_constraint, - ) + + if ignore_paths is not None: + self._motion_generator.update_obstacles(ignore_paths=ignore_paths) + + yield from self._settle_robot() + # curobo motion generator takes a pose but outputs joint positions + if not lock_auxiliary_arm: + target_pos = { + self.robot.eef_link_names[self.arm]: target_pose[0], + } + target_quat = { + self.robot.eef_link_names[self.arm]: target_pose[1], + } else: - # Move EEF directly without collsion checking - goal_arm_joint_pos = self._convert_cartesian_to_joint_space(target_pose, arm=arm) - curr_joint_pos = self.robot.get_joint_positions() - goal_joint_pos = curr_joint_pos.clone() - goal_joint_pos[self._manipulation_control_idx(arm)] = goal_arm_joint_pos - q_traj = th.stack(self._add_linearly_interpolated_waypoints(plan=[curr_joint_pos, goal_joint_pos])) + left_hand_pos, left_hand_quat = target_pose if arm == "left" else self.robot.get_eef_pose(arm="left") + right_hand_pos, right_hand_quat = target_pose if arm == "right" else self.robot.get_eef_pose(arm="right") + target_pos = { + self.robot.eef_link_names["left"]: left_hand_pos, + self.robot.eef_link_names["right"]: right_hand_pos, + } + target_quat = { + self.robot.eef_link_names["left"]: left_hand_quat, + self.robot.eef_link_names["right"]: right_hand_quat, + } + q_traj = self._plan_joint_motion( + target_pos=target_pos, + target_quat=target_quat, + embodiment_selection=CuRoboEmbodimentSelection.ARM, + motion_constraint=motion_constraint, + skip_obstalce_update=ignore_paths is not None, + ) indented_print(f"Plan has {len(q_traj)} steps") yield from self._execute_motion_plan(q_traj, stop_on_contact=not avoid_collision, low_precision=low_precision) @@ -900,6 +906,7 @@ def _plan_joint_motion( target_quat, embodiment_selection=CuRoboEmbodimentSelection.DEFAULT, motion_constraint=None, + skip_obstalce_update=False, ): # If an object is grasped, we need to pass it to the motion planner obj_in_hand = self._get_obj_in_hand() @@ -913,18 +920,18 @@ def _plan_joint_motion( k: th.stack([v for _ in range(self._motion_generator.batch_size)]) for k, v in target_quat.items() } - self._motion_generator.update_obstacles() + if not skip_obstalce_update: + self._motion_generator.update_obstacles() successes, traj_paths = self._motion_generator.compute_trajectories( target_pos=target_pos, target_quat=target_quat, is_local=False, max_attempts=math.ceil(m.MAX_PLANNING_ATTEMPTS / self._motion_generator.batch_size), timeout=60.0, - ik_fail_return=5, + ik_fail_return=m.MAX_IK_FAILURES_BEFORE_RETURN, enable_finetune_trajopt=True, finetune_attempts=1, return_full_result=False, - success_ratio=1.0, attached_obj=attached_obj, motion_constraint=motion_constraint, skip_obstacle_update=True, @@ -955,6 +962,8 @@ def _execute_motion_plan( if ignore_physics: self.robot.set_joint_positions(joint_pos) og.sim.step() + if stop_on_contact and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=True): + return else: # Convert target joint positions to command action = self._q_to_action(joint_pos) @@ -987,6 +996,9 @@ def _execute_motion_plan( if stop_on_contact and collision_detected: return yield self._postprocess_action(action) + action = self._q_to_action( + joint_pos + ) # This is needed for holonomic base joint controller to update local frame pose if not ignore_failure: if not base_target_reached: @@ -1023,9 +1035,11 @@ def _add_linearly_interpolated_waypoints(self, plan, max_inter_dist=0.01): assert len(plan) > 1, "Plan must have at least 2 waypoints to interpolate" interpolated_plan = [] for i in range(len(plan) - 1): - max_diff = max(plan[i + 1] - plan[i]) - num_intervals = math.ceil(max_diff / max_inter_dist) + # Calculate maximum difference across all dimensions + max_diff = (plan[i + 1] - plan[i]).abs().max() + num_intervals = math.ceil(max_diff.item() / max_inter_dist) interpolated_plan += multi_dim_linspace(plan[i], plan[i + 1], num_intervals) + interpolated_plan.append(plan[-1]) return interpolated_plan @@ -1581,16 +1595,17 @@ def _navigate_if_needed(self, obj, pose_on_obj=None, **kwargs): Returns: th.tensor or None: Action array for one step for the robot to navigate or None if it is done navigating """ + self._motion_generator.update_obstacles() if pose_on_obj is not None: - if self._target_in_reach_of_robot(pose_on_obj)[self.arm]: + if self._target_in_reach_of_robot(pose_on_obj, skip_obstacle_update=True)[self.arm]: # No need to navigate. return - elif self._target_in_reach_of_robot(obj.get_position_orientation())[self.arm]: + elif self._target_in_reach_of_robot(obj.get_position_orientation(), skip_obstacle_update=True)[self.arm]: return - yield from self._navigate_to_obj(obj, pose_on_obj=pose_on_obj, **kwargs) + yield from self._navigate_to_obj(obj, pose_on_obj=pose_on_obj, skip_obstacle_update=True, **kwargs) - def _navigate_to_obj(self, obj, pose_on_obj=None, **kwargs): + def _navigate_to_obj(self, obj, pose_on_obj=None, skip_obstacle_update=False, **kwargs): """ Yields action to navigate the robot to be in range of the pose @@ -1601,7 +1616,9 @@ def _navigate_to_obj(self, obj, pose_on_obj=None, **kwargs): Returns: th.tensor or None: Action array for one step for the robot to navigate in range or None if it is done navigating """ - pose = self._sample_pose_near_object(obj, pose_on_obj=pose_on_obj, **kwargs) + pose = self._sample_pose_near_object( + obj, pose_on_obj=pose_on_obj, skip_obstacle_update=skip_obstacle_update, **kwargs + ) if pose is None: raise ActionPrimitiveError( ActionPrimitiveError.Reason.PLANNING_ERROR, @@ -1723,7 +1740,12 @@ def _rotate_in_place(self, end_pose, angle_threshold=m.DEFAULT_ANGLE_THRESHOLD): yield self._postprocess_action(empty_action) def _sample_pose_near_object( - self, obj, pose_on_obj=None, sampling_attempts=m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT, **kwargs + self, + obj, + pose_on_obj=None, + sampling_attempts=m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT, + skip_obstacle_update=False, + **kwargs, ): """ Returns a 2d pose for the robot within in the range of the object and where the robot is not in collision with anything @@ -1751,8 +1773,9 @@ def _sample_pose_near_object( ) attempt = 0 - # Update obstacle once before sampling - self._motion_generator.update_obstacles() + if not skip_obstacle_update: + # Update obstacle once before sampling + self._motion_generator.update_obstacles() while attempt < sampling_attempts: candidate_poses = [] for _ in range(self._collision_check_batch_size): @@ -1938,8 +1961,7 @@ def _validate_poses(self, candidate_poses, pose_on_obj=None, arm=None, skip_obst return ~invalid_results - @staticmethod - def _get_robot_pose_from_2d_pose(pose_2d): + def _get_robot_pose_from_2d_pose(self, pose_2d): """ Gets 3d pose from 2d pose @@ -1950,7 +1972,7 @@ def _get_robot_pose_from_2d_pose(pose_2d): th.tensor: (x,y,z) Position in the world frame th.tensor: (x,y,z,w) Quaternion orientation in the world frame """ - pos = th.tensor([pose_2d[0], pose_2d[1], m.DEFAULT_BODY_OFFSET_FROM_FLOOR], dtype=th.float32) + pos = th.tensor([pose_2d[0], pose_2d[1], self.robot.get_position_orientation()[0][2]], dtype=th.float32) orn = T.euler2quat(th.tensor([0, 0, pose_2d[2]], dtype=th.float32)) return pos, orn From 5b6104b9c0b9415bc56a45c450859878ae05ef72 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Thu, 9 Jan 2025 17:41:41 -0800 Subject: [PATCH 40/76] Minor fixes to primitives --- .../starter_semantic_action_primitives.py | 3 +-- omnigibson/configs/r1_primitives.yaml | 6 ++++++ .../controllers/holonomic_base_joint_controller.py | 2 +- .../examples/action_primitives/rs_int_example.py | 13 ++++--------- omnigibson/robots/r1.py | 3 +-- 5 files changed, 13 insertions(+), 14 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 001432052..3b8a200b5 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -2043,8 +2043,7 @@ def _settle_robot(self): Returns: th.tensor or None: Action array for one step for the robot to do nothing """ - # TODO: fix empty action - for _ in range(30): + for _ in range(10): empty_action = self._q_to_action(self.robot.get_joint_positions()) yield self._postprocess_action(empty_action) diff --git a/omnigibson/configs/r1_primitives.yaml b/omnigibson/configs/r1_primitives.yaml index cf6e36cbe..78f5383ad 100644 --- a/omnigibson/configs/r1_primitives.yaml +++ b/omnigibson/configs/r1_primitives.yaml @@ -83,6 +83,12 @@ robots: use_impedances: false camera: name: JointController + reset_joint_pos: [ 0.0000, -0.0000, 0.0247, 0.0009, 0.0004, + -0.0000, 0.0000, 0.0000, 0.0000, 0.0000, + -0.0464, 0.0464, 2.6172, 2.6168, -1.4584, + -1.4570, -0.0433, 0.0418, 1.5899, -1.5896, + -1.1587, 1.1593, 0.0300, 0.0300, 0.0300, + 0.0300] objects: [] diff --git a/omnigibson/controllers/holonomic_base_joint_controller.py b/omnigibson/controllers/holonomic_base_joint_controller.py index 6790513a0..b7f9b5f93 100644 --- a/omnigibson/controllers/holonomic_base_joint_controller.py +++ b/omnigibson/controllers/holonomic_base_joint_controller.py @@ -1,7 +1,7 @@ import torch as th import omnigibson.utils.transform_utils as T -from omnigibson.controllers import JointController +from omnigibson.controllers.joint_controller import JointController from omnigibson.macros import create_module_macros diff --git a/omnigibson/examples/action_primitives/rs_int_example.py b/omnigibson/examples/action_primitives/rs_int_example.py index 534450510..8ec3c3a8f 100644 --- a/omnigibson/examples/action_primitives/rs_int_example.py +++ b/omnigibson/examples/action_primitives/rs_int_example.py @@ -12,10 +12,6 @@ from omnigibson.robots.tiago import Tiago from omnigibson.utils.ui_utils import choose_from_options -# Don't use GPU dynamics and use flatcache for performance boost -gm.USE_GPU_DYNAMICS = False -gm.ENABLE_FLATCACHE = True - def execute_controller(ctrl_gen, env): for action in ctrl_gen: @@ -36,8 +32,7 @@ def main(): config = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader) config["scene"]["scene_model"] = "Rs_int" - config["scene"]["not_load_object_categories"] = ["ceilings", "loudspeaker", "standing_tv", "pot_plant"] - # config["scene"]["load_object_categories"] = ["floors", "breakfast_table", "bottom_cabinet", "walls"] + config["scene"]["not_load_object_categories"] = ["ceilings", "carpet"] config["objects"] = [ { "type": "DatasetObject", @@ -67,7 +62,7 @@ def main(): env.scene.reset() og.sim.viewer_camera.set_position_orientation( - th.tensor([-0.6084, -3.3571, 1.2033]), th.tensor([0.6349, -0.1778, -0.2027, 0.7240]) + th.tensor([1.8294, -3.2502, 1.6885]), th.tensor([0.5770, 0.1719, 0.2280, 0.7652]) ) # Let the object settle @@ -78,7 +73,7 @@ def main(): og.sim.enable_viewer_camera_teleoperation() controller = StarterSemanticActionPrimitives(env, robot, enable_head_tracking=isinstance(robot, Tiago)) - cabinet = scene.object_registry("name", "bottom_cabinet_jhymlr_0") + coffee_table = scene.object_registry("name", "coffee_table_fqluyq_0") apple = scene.object_registry("name", "apple") # Grasp apple @@ -88,7 +83,7 @@ def main(): # Place on cabinet print("Executing controller") - execute_controller(controller.apply_ref(StarterSemanticActionPrimitiveSet.PLACE_ON_TOP, cabinet), env) + execute_controller(controller.apply_ref(StarterSemanticActionPrimitiveSet.PLACE_ON_TOP, coffee_table), env) print("Finished executing place") diff --git a/omnigibson/robots/r1.py b/omnigibson/robots/r1.py index 643c09984..0c4415575 100644 --- a/omnigibson/robots/r1.py +++ b/omnigibson/robots/r1.py @@ -168,10 +168,9 @@ def untucked_default_joint_pos(self): pos = th.zeros(self.n_dof) # Keep the current joint positions for the base joints pos[self.base_idx] = self.get_joint_positions()[self.base_idx] - pos[self.arm_control_idx["left"]] = th.tensor([-0.0464, 2.6172, -1.4584, -0.0433, 1.5899, -1.1587]) - pos[self.arm_control_idx["right"]] = th.tensor([0.0464, 2.6168, -1.4570, 0.0418, -1.5896, 1.1593]) for arm in self.arm_names: pos[self.gripper_control_idx[arm]] = th.tensor([0.03, 0.03]) # open gripper + pos[self.arm_control_idx[arm]] = th.tensor([0.0, 1.906, -0.991, 1.571, 0.915, -1.571]) return pos @property From b750dcb8ca4f854895eca4def04ecf7b79eb03cf Mon Sep 17 00:00:00 2001 From: hang-yin Date: Thu, 9 Jan 2025 18:33:00 -0800 Subject: [PATCH 41/76] Torch to cb for holonomic base joint controller --- .../holonomic_base_joint_controller.py | 28 +++++++++---------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/omnigibson/controllers/holonomic_base_joint_controller.py b/omnigibson/controllers/holonomic_base_joint_controller.py index 26e136f5d..70c9bfed6 100644 --- a/omnigibson/controllers/holonomic_base_joint_controller.py +++ b/omnigibson/controllers/holonomic_base_joint_controller.py @@ -1,6 +1,4 @@ -import torch as th - -import omnigibson.utils.transform_utils as T +from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.controllers.joint_controller import JointController @@ -91,41 +89,41 @@ def _update_goal(self, command, control_dict): """ Updates the goal command by transforming it from the robot's local frame to its canonical frame. """ - base_pose = T.pose2mat((control_dict["root_pos"], control_dict["root_quat"])) - canonical_pose = T.pose2mat((control_dict["canonical_pos"], control_dict["canonical_quat"])) - canonical_to_base_pose = T.pose_inv(canonical_pose) @ base_pose + base_pose = cb.T.pose2mat((control_dict["root_pos"], control_dict["root_quat"])) + canonical_pose = cb.T.pose2mat((control_dict["canonical_pos"], control_dict["canonical_quat"])) + canonical_to_base_pose = cb.T.pose_inv(canonical_pose) @ base_pose if self.motor_type == "position": # Handle position control mode - command_in_base_frame = th.eye(4) + command_in_base_frame = cb.eye(4) command_in_base_frame[:2, 3] = command[:2] # Set x,y translation - command_in_base_frame[:3, :3] = T.euler2mat(th.tensor([0.0, 0.0, command[2]])) # Set rotation + command_in_base_frame[:3, :3] = cb.T.euler2mat(cb.array([0.0, 0.0, command[2]])) # Set rotation # Transform command to canonical frame command_in_canonical_frame = canonical_to_base_pose @ command_in_base_frame # Extract position and yaw from transformed command position = command_in_canonical_frame[:2, 3] - yaw = T.mat2euler(command_in_canonical_frame[:3, :3])[2:3] - command = th.cat([position, yaw]) + yaw = cb.T.mat2euler(command_in_canonical_frame[:3, :3])[2:3] + command = cb.cat([position, yaw]) else: # Handle velocity/effort control modes # Note: Only rotate the commands, don't translate rotation_matrix = canonical_to_base_pose[:3, :3] # Prepare poses for transformation - rotation_poses = th.zeros((2, 3, 3)) + rotation_poses = cb.zeros((2, 3, 3)) rotation_poses[:, :3, :3] = rotation_matrix - local_vectors = th.zeros((2, 3, 1)) - local_vectors[0, :, 0] = th.tensor([command[0], command[1], 0.0]) # Linear - local_vectors[1, :, 0] = th.tensor([0.0, 0.0, command[2]]) # Angular + local_vectors = cb.zeros((2, 3, 1)) + local_vectors[0, :, 0] = cb.array([command[0], command[1], 0.0]) # Linear + local_vectors[1, :, 0] = cb.array([0.0, 0.0, command[2]]) # Angular # Transform to global frame global_vectors = rotation_poses @ local_vectors linear_global = global_vectors[0] angular_global = global_vectors[1] - command = th.tensor([linear_global[0], linear_global[1], angular_global[2]]) + command = cb.array([linear_global[0], linear_global[1], angular_global[2]]) return super()._update_goal(command=command, control_dict=control_dict) From 201022e10c9cd0250d70e18fac808b4d69939180 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Thu, 9 Jan 2025 18:58:15 -0800 Subject: [PATCH 42/76] Primitive fixes --- .../starter_semantic_action_primitives.py | 31 +++++++++++++------ 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index cc46e4fd7..aa7466a7f 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -26,6 +26,7 @@ ) from omnigibson.action_primitives.curobo import CuRoboEmbodimentSelection, CuRoboMotionGenerator from omnigibson.controllers import ( + HolonomicBaseJointController, InverseKinematicsController, JointController, ) @@ -110,6 +111,11 @@ def indented_print(msg, *args, **kwargs): print(" " * len(inspect.stack()) + str(msg), *args, **kwargs) +def normalize_angle(angle): + """Normalize angle to [-pi, pi] range.""" + return th.atan2(th.sin(angle), th.cos(angle)) + + class StarterSemanticActionPrimitiveSet(IntEnum): _init_ = "value __doc__" GRASP = auto(), "Grasp an object" @@ -560,9 +566,6 @@ def _grasp(self, obj): {"target object": obj.name}, ) - # TODO: ag force seems to not be enough to keep the object in hand. need to investigate - obj_in_hand.root_link.density = 1.0 - indented_print("Moving hand back") yield from self._reset_hand(self.arm) @@ -945,8 +948,12 @@ def _plan_joint_motion( q_traj = self._motion_generator.path_to_joint_trajectory( traj_path, get_full_js=True, emb_sel=embodiment_selection - ) - return q_traj.cpu() + ).cpu() + + # Smooth out the trajectory + q_traj = th.stack(self._add_linearly_interpolated_waypoints(plan=q_traj, max_inter_dist=0.01)) + + return q_traj def _execute_motion_plan( self, q_traj, stop_on_contact=False, ignore_failure=False, low_precision=False, ignore_physics=False @@ -982,8 +989,10 @@ def _execute_motion_plan( ) if th.max(th.abs(articulation_joint_diff)).item() < articulation_threshold: articulation_target_reached = True - # TODO: genralize this to transaltion&rotation + high/low precision modes - if th.max(th.abs(base_joint_diff)).item() < m.DEFAULT_DIST_THRESHOLD: + if ( + th.max(th.abs(base_joint_diff[:2])).item() < m.DEFAULT_DIST_THRESHOLD + and th.abs(normalize_angle(base_joint_diff[2])).item() < m.DEFAULT_ANGLE_THRESHOLD + ): base_target_reached = True if base_target_reached and articulation_target_reached: break @@ -1011,6 +1020,10 @@ def _q_to_action(self, q): action = [] for controller in self.robot.controllers.values(): command = q[controller.dof_idx] + if isinstance(controller, HolonomicBaseJointController): + # For a holonomic base joint controller, the command should be in the robot local frame + local_pose = self._world_pose_to_robot_pose(self._get_robot_pose_from_2d_pose(command)) + command = th.tensor([local_pose[0][0], local_pose[0][1], T.quat2euler(local_pose[1])[2]]) action.append(controller._reverse_preprocess_command(command)) action = th.cat(action, dim=0) assert action.shape[0] == self.robot.action_dim @@ -1975,7 +1988,7 @@ def _world_pose_to_robot_pose(self, pose): Converts the pose in the world frame to the robot frame Args: - pose_2d (Iterable): (x, y, yaw) 2d pose + pose (Iterable): (pos, quat) Pose in the world frame Returns: 2-tuple: @@ -1990,7 +2003,7 @@ def _robot_pose_to_world_pose(self, pose): Converts the pose in the robot frame to the world frame Args: - pose_2d (Iterable): (x, y, yaw) 2d pose + pose (Iterable): (pos, quat) Pose in the robot frame Returns: 2-tuple: From 01880685654fcc9a147bf3651d400f035d4f1c24 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Fri, 10 Jan 2025 14:07:47 -0800 Subject: [PATCH 43/76] Tiny fix --- omnigibson/controllers/joint_controller.py | 1 - 1 file changed, 1 deletion(-) diff --git a/omnigibson/controllers/joint_controller.py b/omnigibson/controllers/joint_controller.py index 97ae01c3b..0797157bc 100644 --- a/omnigibson/controllers/joint_controller.py +++ b/omnigibson/controllers/joint_controller.py @@ -48,7 +48,6 @@ def __init__( command_output_limits="default", pos_kp=None, pos_damping_ratio=None, - max_integral_error=None, vel_kp=None, use_impedances=False, use_gravity_compensation=False, From c9b014d5782bf2d92bd98eb27e90741fc7972c8d Mon Sep 17 00:00:00 2001 From: hang-yin Date: Fri, 10 Jan 2025 14:16:12 -0800 Subject: [PATCH 44/76] Set velocity target as zero when setting position target --- omnigibson/objects/controllable_object.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/omnigibson/objects/controllable_object.py b/omnigibson/objects/controllable_object.py index ad96d7f10..6b5d249ae 100644 --- a/omnigibson/objects/controllable_object.py +++ b/omnigibson/objects/controllable_object.py @@ -565,6 +565,12 @@ def deploy_control(self, control, control_type): positions=control[pos_idxs], indices=pos_idxs, ) + # If we're setting joint position targets, we should also set velocity targets to 0 + ControllableObjectViewAPI.set_joint_velocity_targets( + self.articulation_root_path, + velocities=cb.zeros(len(pos_idxs)), + indices=pos_idxs, + ) vel_idxs = cb.where(control_type == ControlType.VELOCITY)[0] if len(vel_idxs) > 0: ControllableObjectViewAPI.set_joint_velocity_targets( From 4cf40e721c174e642e5d2a20378ecae4b57b433e Mon Sep 17 00:00:00 2001 From: hang-yin Date: Fri, 10 Jan 2025 14:41:23 -0800 Subject: [PATCH 45/76] Make primitives robot settle time longer --- .../action_primitives/starter_semantic_action_primitives.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index aa7466a7f..48ab9cd8f 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -2050,7 +2050,7 @@ def _settle_robot(self): Returns: th.tensor or None: Action array for one step for the robot to do nothing """ - for _ in range(10): + for _ in range(50): empty_action = self._q_to_action(self.robot.get_joint_positions()) yield self._postprocess_action(empty_action) From 17fd4f7c588ae5056b9aa495b5909ee751b40484 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Fri, 10 Jan 2025 17:37:43 -0800 Subject: [PATCH 46/76] Fix holonomic base joint controller angle clipping bug --- omnigibson/controllers/holonomic_base_joint_controller.py | 3 +-- omnigibson/robots/holonomic_base_robot.py | 8 ++++++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/omnigibson/controllers/holonomic_base_joint_controller.py b/omnigibson/controllers/holonomic_base_joint_controller.py index 70c9bfed6..e03b9e573 100644 --- a/omnigibson/controllers/holonomic_base_joint_controller.py +++ b/omnigibson/controllers/holonomic_base_joint_controller.py @@ -97,7 +97,6 @@ def _update_goal(self, command, control_dict): # Handle position control mode command_in_base_frame = cb.eye(4) command_in_base_frame[:2, 3] = command[:2] # Set x,y translation - command_in_base_frame[:3, :3] = cb.T.euler2mat(cb.array([0.0, 0.0, command[2]])) # Set rotation # Transform command to canonical frame command_in_canonical_frame = canonical_to_base_pose @ command_in_base_frame @@ -124,6 +123,6 @@ def _update_goal(self, command, control_dict): linear_global = global_vectors[0] angular_global = global_vectors[1] - command = cb.array([linear_global[0], linear_global[1], angular_global[2]]) + command = cb.cat([linear_global[0], linear_global[1], angular_global[2]]) return super()._update_goal(command=command, control_dict=control_dict) diff --git a/omnigibson/robots/holonomic_base_robot.py b/omnigibson/robots/holonomic_base_robot.py index c5e1a23bc..bc48e2e13 100644 --- a/omnigibson/robots/holonomic_base_robot.py +++ b/omnigibson/robots/holonomic_base_robot.py @@ -1,6 +1,7 @@ from functools import cached_property from typing import Literal +import math import torch as th import omnigibson as og @@ -206,6 +207,13 @@ def _initialize(self): # Reload the controllers to update their command_output_limits and control_limits self.reload_controllers(self._controller_config) + def apply_action(self, action): + j_pos = self.joints["base_footprint_rz_joint"].get_state()[0] + if j_pos < -math.pi or j_pos > math.pi: + j_pos = (j_pos + math.pi) % (2 * math.pi) - math.pi + self.joints["base_footprint_rz_joint"].set_pos(j_pos, drive=False) + super().apply_action(action) + @cached_property def base_idx(self): """ From 088bc786351f8db5d81f63374d1639e90be036a8 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Fri, 10 Jan 2025 17:40:27 -0800 Subject: [PATCH 47/76] Fix tests --- .../starter_semantic_action_primitives.py | 24 +++-------- .../symbolic_semantic_action_primitives.py | 2 +- omnigibson/robots/holonomic_base_robot.py | 41 +++++++++++++++++++ tests/test_curobo.py | 6 +-- tests/test_robot_states_flatcache.py | 15 ++++--- 5 files changed, 58 insertions(+), 30 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 48ab9cd8f..a6ac773b3 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -26,7 +26,6 @@ ) from omnigibson.action_primitives.curobo import CuRoboEmbodimentSelection, CuRoboMotionGenerator from omnigibson.controllers import ( - HolonomicBaseJointController, InverseKinematicsController, JointController, ) @@ -968,7 +967,7 @@ def _execute_motion_plan( return else: # Convert target joint positions to command - action = self._q_to_action(joint_pos) + action = self.robot.q_to_action(joint_pos) base_target_reached = False articulation_target_reached = False @@ -1000,7 +999,7 @@ def _execute_motion_plan( if stop_on_contact and collision_detected: return yield self._postprocess_action(action) - action = self._q_to_action( + action = self.robot.q_to_action( joint_pos ) # This is needed for holonomic base joint controller to update local frame pose @@ -1016,19 +1015,6 @@ def _execute_motion_plan( "Could not reach the target articulation joint positions. Try again", ) - def _q_to_action(self, q): - action = [] - for controller in self.robot.controllers.values(): - command = q[controller.dof_idx] - if isinstance(controller, HolonomicBaseJointController): - # For a holonomic base joint controller, the command should be in the robot local frame - local_pose = self._world_pose_to_robot_pose(self._get_robot_pose_from_2d_pose(command)) - command = th.tensor([local_pose[0][0], local_pose[0][1], T.quat2euler(local_pose[1])[2]]) - action.append(controller._reverse_preprocess_command(command)) - action = th.cat(action, dim=0) - assert action.shape[0] == self.robot.action_dim - return action - def _add_linearly_interpolated_waypoints(self, plan, max_inter_dist=0.01): """ Adds waypoints to the plan so the distance between values in the plan never exceeds the max_inter_dist. @@ -1306,7 +1292,7 @@ def _move_fingers_to_limit(self, limit_type): for finger_joint in self.robot.finger_joints[self.arm]: idx = joint_names.index(finger_joint.joint_name) q[idx] = getattr(finger_joint, f"{limit_type}_limit") - action = self._q_to_action(q) + action = self.robot.q_to_action(q) finger_joint_limits = getattr(self.robot, f"joint_{limit_type}_limits")[ self.robot.gripper_control_idx[self.arm] ] @@ -2051,11 +2037,11 @@ def _settle_robot(self): th.tensor or None: Action array for one step for the robot to do nothing """ for _ in range(50): - empty_action = self._q_to_action(self.robot.get_joint_positions()) + empty_action = self.robot.q_to_action(self.robot.get_joint_positions()) yield self._postprocess_action(empty_action) for _ in range(m.MAX_STEPS_FOR_SETTLING): if th.norm(self.robot.get_linear_velocity()) < 0.01: break - empty_action = self._q_to_action(self.robot.get_joint_positions()) + empty_action = self.robot.q_to_action(self.robot.get_joint_positions()) yield self._postprocess_action(empty_action) diff --git a/omnigibson/action_primitives/symbolic_semantic_action_primitives.py b/omnigibson/action_primitives/symbolic_semantic_action_primitives.py index 9c65db270..e27aa79f1 100644 --- a/omnigibson/action_primitives/symbolic_semantic_action_primitives.py +++ b/omnigibson/action_primitives/symbolic_semantic_action_primitives.py @@ -41,7 +41,7 @@ class SymbolicSemanticActionPrimitiveSet(IntEnum): class SymbolicSemanticActionPrimitives(StarterSemanticActionPrimitives): def __init__(self, env, robot): - super().__init__(env, robot) + super().__init__(env, robot, skip_curobo_initilization=True) self.controller_functions = { SymbolicSemanticActionPrimitiveSet.GRASP: self._grasp, SymbolicSemanticActionPrimitiveSet.PLACE_ON_TOP: self._place_on_top, diff --git a/omnigibson/robots/holonomic_base_robot.py b/omnigibson/robots/holonomic_base_robot.py index bc48e2e13..945cfde58 100644 --- a/omnigibson/robots/holonomic_base_robot.py +++ b/omnigibson/robots/holonomic_base_robot.py @@ -7,10 +7,12 @@ import omnigibson as og import omnigibson.lazy as lazy from omnigibson.macros import create_module_macros +from omnigibson.controllers import JointController, HolonomicBaseJointController from omnigibson.robots.locomotion_robot import LocomotionRobot from omnigibson.robots.manipulation_robot import ManipulationRobot from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.utils.python_utils import classproperty +import omnigibson.utils.transform_utils as T from omnigibson.utils.usd_utils import ControllableObjectViewAPI m = create_module_macros(module_path=__file__) @@ -353,6 +355,45 @@ def get_control_dict(self): return fcns + def q_to_action(self, q): + """ + Converts a target joint configuration to an action that can be applied to this object. + All controllers should be JointController with use_delta_commands=False + """ + action = [] + for name, controller in self.controllers.items(): + assert ( + isinstance(controller, JointController) and not controller.use_delta_commands + ), f"Controller [{name}] should be a JointController/HolonomicBaseJointController with use_delta_commands=False!" + command = q[controller.dof_idx] + if isinstance(controller, HolonomicBaseJointController): + # For a holonomic base joint controller, the command should be in the robot local frame + local_pose = self._2d_world_pose_to_robot_pose(command) + command = th.tensor([local_pose[0][0], local_pose[0][1], T.quat2euler(local_pose[1])[2]]) + action.append(controller._reverse_preprocess_command(command)) + action = th.cat(action, dim=0) + assert ( + action.shape[0] == self.action_dim + ), f"Action should have dimension {self.action_dim}, got {action.shape[0]}" + return action + + def _2d_world_pose_to_robot_pose(self, pose_2d): + """ + Converts 2D pose (x, y, yaw) directly to robot-relative pose + + Args: + pose_2d (Iterable): (x, y, yaw) 2D pose + + Returns: + 2-tuple: + - 3-array: (x,y,z) Position in the robot frame + - 4-array: (x,y,z,w) Quaternion orientation in the robot frame + """ + body_pose = self.get_position_orientation() + world_pos = th.tensor([pose_2d[0], pose_2d[1], body_pose[0][2]], dtype=th.float32) + world_orn = T.euler2quat(th.tensor([0, 0, pose_2d[2]], dtype=th.float32)) + return T.relative_pose_transform(world_pos, world_orn, *body_pose) + def teleop_data_to_action(self, teleop_action) -> th.Tensor: action = ManipulationRobot.teleop_data_to_action(self, teleop_action) action[self.base_action_idx] = th.tensor(teleop_action.base).float() * 0.1 diff --git a/tests/test_curobo.py b/tests/test_curobo.py index adab8b9e9..86e0d0de9 100644 --- a/tests/test_curobo.py +++ b/tests/test_curobo.py @@ -94,10 +94,9 @@ def test_curobo(): "action_normalize": False, "controller_config": { "base": { - "name": "JointController", + "name": "HolonomicBaseJointController", "motor_type": "position", "command_input_limits": None, - "use_delta_commands": False, "use_impedances": True, }, "trunk": { @@ -146,10 +145,9 @@ def test_curobo(): "action_normalize": False, "controller_config": { "base": { - "name": "JointController", + "name": "HolonomicBaseJointController", "motor_type": "position", "command_input_limits": None, - "use_delta_commands": False, "use_impedances": True, }, "trunk": { diff --git a/tests/test_robot_states_flatcache.py b/tests/test_robot_states_flatcache.py index 44951b206..ae699ca3e 100644 --- a/tests/test_robot_states_flatcache.py +++ b/tests/test_robot_states_flatcache.py @@ -6,6 +6,7 @@ from omnigibson.macros import gm from omnigibson.robots import REGISTERED_ROBOTS, Fetch, LocomotionRobot, ManipulationRobot, Stretch from omnigibson.sensors import VisionSensor +from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.utils.transform_utils import mat2pose, pose2mat, quaternions_close, relative_pose_transform from omnigibson.utils.usd_utils import PoseAPI @@ -285,7 +286,7 @@ def object_is_in_hand(robot, obj): for _ in range(10): og.sim.step() - action_primitives = StarterSemanticActionPrimitives(robot) + action_primitives = StarterSemanticActionPrimitives(env=env, robot=robot, skip_curobo_initilization=True) box_object = env.scene.object_registry("name", "box") target_eef_pos = box_object.get_position_orientation()[0] @@ -295,9 +296,12 @@ def object_is_in_hand(robot, obj): for action in action_primitives._move_hand_direct_ik((target_eef_pos, target_eef_orn), pos_thresh=0.01): env.step(action) + gripper_controller = robot.controllers["gripper_0"] + # Grasp the box - for action in action_primitives._execute_grasp(): - env.step(action) + gripper_controller.update_goal(cb.array([-1]), robot.get_control_dict()) + for _ in range(20): + og.sim.step() assert object_is_in_hand(robot, box_object), f"Grasping mode {grasping_mode} failed to grasp the object" @@ -309,9 +313,8 @@ def object_is_in_hand(robot, obj): assert object_is_in_hand(robot, box_object), f"Grasping mode {grasping_mode} failed to keep the object in hand" # Release the box - for action in action_primitives._execute_release(): - env.step(action) - for _ in range(10): + gripper_controller.update_goal(cb.array([1]), robot.get_control_dict()) + for _ in range(20): og.sim.step() assert not object_is_in_hand(robot, box_object), f"Grasping mode {grasping_mode} failed to release the object" From b90ea7c4d554c1667ea9b071c77b971aa2f29635 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Fri, 10 Jan 2025 17:56:53 -0800 Subject: [PATCH 48/76] Small fixes for primitive config --- omnigibson/configs/r1_primitives.yaml | 1 + omnigibson/configs/tiago_primitives.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/omnigibson/configs/r1_primitives.yaml b/omnigibson/configs/r1_primitives.yaml index 78f5383ad..b3b241349 100644 --- a/omnigibson/configs/r1_primitives.yaml +++ b/omnigibson/configs/r1_primitives.yaml @@ -83,6 +83,7 @@ robots: use_impedances: false camera: name: JointController + use_delta_commands: false reset_joint_pos: [ 0.0000, -0.0000, 0.0247, 0.0009, 0.0004, -0.0000, 0.0000, 0.0000, 0.0000, 0.0000, -0.0464, 0.0464, 2.6172, 2.6168, -1.4584, diff --git a/omnigibson/configs/tiago_primitives.yaml b/omnigibson/configs/tiago_primitives.yaml index a9356d39b..017fe88da 100644 --- a/omnigibson/configs/tiago_primitives.yaml +++ b/omnigibson/configs/tiago_primitives.yaml @@ -116,6 +116,7 @@ robots: use_impedances: false camera: name: JointController + use_delta_commands: false objects: [] From c9c694ce10c958e8f6ad17dad158e3b1d5446ef1 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Mon, 13 Jan 2025 13:50:20 -0800 Subject: [PATCH 49/76] Add option to ignore object during curobo obstacle update --- omnigibson/action_primitives/curobo.py | 8 +++++++- .../starter_semantic_action_primitives.py | 13 ++++++------- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/omnigibson/action_primitives/curobo.py b/omnigibson/action_primitives/curobo.py index 5a8bd1eda..8a27e2f8f 100644 --- a/omnigibson/action_primitives/curobo.py +++ b/omnigibson/action_primitives/curobo.py @@ -218,9 +218,13 @@ def save_visualization(self, q, file_path, emb_sel=CuRoboEmbodimentSelection.DEF robot_world.add_obstacle(mesh_world.mesh[0]) robot_world.save_world_as_mesh(file_path) - def update_obstacles(self): + def update_obstacles(self, ignore_objects=None): """ Updates internal world collision cache representation based on sim state + + Args: + ignore_objects (None or list of DatasetObject): If specified, objects that should + be ignored when updating obstacles """ obstacles = {"cuboid": None, "sphere": None, "mesh": [], "cylinder": None, "capsule": None} robot_transform = T.pose_inv(T.pose2mat(self.robot.root_link.get_position_orientation())) @@ -237,6 +241,8 @@ def update_obstacles(self): continue if obj.visual_only: continue + if ignore_objects is not None and obj in ignore_objects: + continue for link in obj.links.values(): for collision_mesh in link.collision_meshes.values(): assert ( diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index a6ac773b3..0c10f3a29 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -547,7 +547,7 @@ def _grasp(self, obj): # It's okay if we can't go all the way because we run into the object. indented_print("Performing grasp approach") # Use direct IK to move the hand to the approach pose. - yield from self._move_hand(approach_pose, avoid_collision=False, ignore_paths=[obj.prim_path]) + yield from self._move_hand(approach_pose, avoid_collision=False, ignore_objects=[obj]) elif self.robot.grasping_mode == "assisted": indented_print("Performing grasp approach") yield from self._move_hand(approach_pose) @@ -840,7 +840,7 @@ def _move_hand( motion_constraint=None, low_precision=False, lock_auxiliary_arm=False, - ignore_paths=None, + ignore_objects=None, ): """ Yields action for the robot to move hand so the eef is in the target pose using the planner @@ -852,8 +852,7 @@ def _move_hand( motion_constraint (MotionConstraint): Motion constraint for the motion low_precision (bool): Whether to use low precision for the motion lock_auxiliary_arm (bool): Whether to lock the other arm in place - ignore_paths (None or list of str): If specified, prim path substrings that should - be ignored when planning + ignore_objects (None or list of str): If specified, objects that should be ignored when planning Returns: th.tensor or None: Action array for one step for the robot to move hand or None if its at the target pose @@ -863,8 +862,8 @@ def _move_hand( if arm is None: arm = self.arm - if ignore_paths is not None: - self._motion_generator.update_obstacles(ignore_paths=ignore_paths) + if ignore_objects is not None: + self._motion_generator.update_obstacles(ignore_objects=ignore_objects) yield from self._settle_robot() # curobo motion generator takes a pose but outputs joint positions @@ -891,7 +890,7 @@ def _move_hand( target_quat=target_quat, embodiment_selection=CuRoboEmbodimentSelection.ARM, motion_constraint=motion_constraint, - skip_obstalce_update=ignore_paths is not None, + skip_obstalce_update=ignore_objects is not None, ) indented_print(f"Plan has {len(q_traj)} steps") From bc26e0696ac2f072fc542607fdbcced9bbabc95e Mon Sep 17 00:00:00 2001 From: hang-yin Date: Mon, 13 Jan 2025 13:53:20 -0800 Subject: [PATCH 50/76] Holonomic base control fixes; angle wrapping helper --- .../starter_semantic_action_primitives.py | 8 +--- .../holonomic_base_joint_controller.py | 45 ++++++++++++------- omnigibson/controllers/osc_controller.py | 3 +- omnigibson/robots/holonomic_base_robot.py | 8 +++- omnigibson/utils/geometry_utils.py | 13 ++++++ omnigibson/utils/motion_planning_utils.py | 18 ++------ 6 files changed, 56 insertions(+), 39 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 0c10f3a29..30a5b60fc 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -35,6 +35,7 @@ from omnigibson.robots.manipulation_robot import ManipulationRobot from omnigibson.tasks.behavior_task import BehaviorTask from omnigibson.utils.backend_utils import _compute_backend as cb +from omnigibson.utils.geometry_utils import wrap_angle from omnigibson.utils.grasping_planning_utils import get_grasp_poses_for_object_sticky, get_grasp_position_for_open from omnigibson.utils.motion_planning_utils import detect_robot_collision_in_sim from omnigibson.utils.object_state_utils import sample_cuboid_for_predicate @@ -110,11 +111,6 @@ def indented_print(msg, *args, **kwargs): print(" " * len(inspect.stack()) + str(msg), *args, **kwargs) -def normalize_angle(angle): - """Normalize angle to [-pi, pi] range.""" - return th.atan2(th.sin(angle), th.cos(angle)) - - class StarterSemanticActionPrimitiveSet(IntEnum): _init_ = "value __doc__" GRASP = auto(), "Grasp an object" @@ -989,7 +985,7 @@ def _execute_motion_plan( articulation_target_reached = True if ( th.max(th.abs(base_joint_diff[:2])).item() < m.DEFAULT_DIST_THRESHOLD - and th.abs(normalize_angle(base_joint_diff[2])).item() < m.DEFAULT_ANGLE_THRESHOLD + and wrap_angle(base_joint_diff[2]) < m.DEFAULT_ANGLE_THRESHOLD ): base_target_reached = True if base_target_reached and articulation_target_reached: diff --git a/omnigibson/controllers/holonomic_base_joint_controller.py b/omnigibson/controllers/holonomic_base_joint_controller.py index e03b9e573..8c36bd265 100644 --- a/omnigibson/controllers/holonomic_base_joint_controller.py +++ b/omnigibson/controllers/holonomic_base_joint_controller.py @@ -1,5 +1,6 @@ from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.controllers.joint_controller import JointController +from omnigibson.utils.geometry_utils import wrap_angle class HolonomicBaseJointController(JointController): @@ -101,28 +102,42 @@ def _update_goal(self, command, control_dict): # Transform command to canonical frame command_in_canonical_frame = canonical_to_base_pose @ command_in_base_frame - # Extract position and yaw from transformed command + # Extract x,y translation from transformed command position = command_in_canonical_frame[:2, 3] - yaw = cb.T.mat2euler(command_in_canonical_frame[:3, :3])[2:3] - command = cb.cat([position, yaw]) + + # Since our virtual joints are in the order of ["x", "y", "z", "rx", "ry", "rz"], + # the "base_footprint_rz_link" is aligned with @self.base_footprint_link in the z-axis + # We just need to directly apply the command as delta to the current joint position of "base_footprint_rz_joint" + # Note that the current joint position is guaranteed to be in the range of [-pi, pi] because + # @HolonomicBaseRobot.apply_action explicitly wraps the joint position to [-pi, pi] if it's out of range + rz_joint_pos = control_dict["joint_position"][self.dof_idx][2:3] + + # Wrap the delta joint position to [-pi, pi]. In other words, we don't expect the commanded delta joint position + # to have a magnitude greater than pi, because the robot can't reasonably rotate more than pi in a single step + delta_joint_pos = wrap_angle(command[2]) + + # Calculate the new joint position. This is guaranteed to be in the range of [-pi * 2, pi * 2], because both quantities + # are in the range of [-pi, pi]. This is important because revolute joints in Isaac Sim have a joint position (target) range of [-pi * 2, pi * 2] + new_joint_pos = rz_joint_pos + delta_joint_pos + + command = cb.cat([position, new_joint_pos]) else: # Handle velocity/effort control modes # Note: Only rotate the commands, don't translate - rotation_matrix = canonical_to_base_pose[:3, :3] + command_in_base_frame = cb.eye(4) + command_in_base_frame[:2, 3] = command[:2] # Set x,y linear velocity + + canonical_to_base_pose_rotation = cb.eye(4) + canonical_to_base_pose_rotation[:3, :3] = canonical_to_base_pose[:3, :3] - # Prepare poses for transformation - rotation_poses = cb.zeros((2, 3, 3)) - rotation_poses[:, :3, :3] = rotation_matrix + # Transform command to canonical frame + command_in_canonical_frame = canonical_to_base_pose_rotation @ command_in_base_frame - local_vectors = cb.zeros((2, 3, 1)) - local_vectors[0, :, 0] = cb.array([command[0], command[1], 0.0]) # Linear - local_vectors[1, :, 0] = cb.array([0.0, 0.0, command[2]]) # Angular + # Extract x,y linear velocity from transformed command + linear_velocity = command_in_canonical_frame[:2, 3] - # Transform to global frame - global_vectors = rotation_poses @ local_vectors - linear_global = global_vectors[0] - angular_global = global_vectors[1] + angular_velocity = command[2:3] - command = cb.cat([linear_global[0], linear_global[1], angular_global[2]]) + command = cb.cat([linear_velocity, angular_velocity]) return super()._update_goal(command=command, control_dict=control_dict) diff --git a/omnigibson/controllers/osc_controller.py b/omnigibson/controllers/osc_controller.py index e8976b8a4..bcbeafa24 100644 --- a/omnigibson/controllers/osc_controller.py +++ b/omnigibson/controllers/osc_controller.py @@ -9,6 +9,7 @@ from omnigibson.controllers import ControlType, ManipulationController from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.utils.backend_utils import add_compute_function +from omnigibson.utils.geometry_utils import wrap_angle from omnigibson.utils.python_utils import assert_valid_key from omnigibson.utils.ui_utils import create_module_logger @@ -562,7 +563,7 @@ def _compute_osc_torques_torch( # roboticsproceedings.org/rss07/p31.pdf if rest_qpos is not None: j_eef_inv = m_eef @ j_eef @ mm_inv - u_null = kd_null * -qd + kp_null * ((rest_qpos - q + math.pi) % (2 * math.pi) - math.pi) + u_null = kd_null * -qd + kp_null * wrap_angle(rest_qpos - q) u_null = mm @ th.unsqueeze(u_null, dim=-1) u += (th.eye(control_dim, dtype=th.float32) - j_eef.T @ j_eef_inv) @ u_null diff --git a/omnigibson/robots/holonomic_base_robot.py b/omnigibson/robots/holonomic_base_robot.py index 945cfde58..04360d44c 100644 --- a/omnigibson/robots/holonomic_base_robot.py +++ b/omnigibson/robots/holonomic_base_robot.py @@ -11,6 +11,7 @@ from omnigibson.robots.locomotion_robot import LocomotionRobot from omnigibson.robots.manipulation_robot import ManipulationRobot from omnigibson.utils.backend_utils import _compute_backend as cb +from omnigibson.utils.geometry_utils import wrap_angle from omnigibson.utils.python_utils import classproperty import omnigibson.utils.transform_utils as T from omnigibson.utils.usd_utils import ControllableObjectViewAPI @@ -212,7 +213,7 @@ def _initialize(self): def apply_action(self, action): j_pos = self.joints["base_footprint_rz_joint"].get_state()[0] if j_pos < -math.pi or j_pos > math.pi: - j_pos = (j_pos + math.pi) % (2 * math.pi) - math.pi + j_pos = wrap_angle(j_pos) self.joints["base_footprint_rz_joint"].set_pos(j_pos, drive=False) super().apply_action(action) @@ -368,8 +369,11 @@ def q_to_action(self, q): command = q[controller.dof_idx] if isinstance(controller, HolonomicBaseJointController): # For a holonomic base joint controller, the command should be in the robot local frame + cur_rz_joint_pos = self.get_joint_positions()[self.base_idx][5] + delta_q = wrap_angle(command[2] - cur_rz_joint_pos) local_pose = self._2d_world_pose_to_robot_pose(command) - command = th.tensor([local_pose[0][0], local_pose[0][1], T.quat2euler(local_pose[1])[2]]) + + command = th.tensor([local_pose[0][0], local_pose[0][1], delta_q]) action.append(controller._reverse_preprocess_command(command)) action = th.cat(action, dim=0) assert ( diff --git a/omnigibson/utils/geometry_utils.py b/omnigibson/utils/geometry_utils.py index 56de2ee18..50a64ef7b 100644 --- a/omnigibson/utils/geometry_utils.py +++ b/omnigibson/utils/geometry_utils.py @@ -11,6 +11,19 @@ from omnigibson.utils.usd_utils import mesh_prim_mesh_to_trimesh_mesh +def wrap_angle(theta): + """ " + Converts an angle to the range [-pi, pi). + + Args: + theta (float): angle in radians + + Returns: + float: angle in radians in range [-pi, pi) + """ + return (theta + math.pi) % (2 * math.pi) - math.pi + + def get_particle_positions_in_frame(pos, quat, scale, particle_positions): """ Transforms particle positions @positions into the frame specified by @pos and @quat with new scale @scale, diff --git a/omnigibson/utils/motion_planning_utils.py b/omnigibson/utils/motion_planning_utils.py index cf1c47729..32b6ceb67 100644 --- a/omnigibson/utils/motion_planning_utils.py +++ b/omnigibson/utils/motion_planning_utils.py @@ -8,6 +8,7 @@ import omnigibson.utils.transform_utils as T from omnigibson.macros import create_module_macros from omnigibson.utils.control_utils import IKSolver +from omnigibson.utils.geometry_utils import wrap_angle from omnigibson.utils.sim_utils import prim_paths_to_rigid_prims from omnigibson.utils.ui_utils import create_module_logger from omnigibson.utils.usd_utils import GripperRigidContactAPI @@ -19,19 +20,6 @@ m.DIST_DIFF = 0.1 -def _wrap_angle(theta): - """ " - Converts an angle to the range [-pi, pi). - - Args: - theta (float): angle in radians - - Returns: - float: angle in radians in range [-pi, pi) - """ - return (theta + math.pi) % (2 * math.pi) - math.pi - - def plan_base_motion( robot, end_conf, @@ -89,7 +77,7 @@ def checkMotion(self, s1, s2): @staticmethod def is_valid_rotation(si, start_conf, final_orientation): - diff = _wrap_angle(final_orientation - start_conf[2]) + diff = wrap_angle(final_orientation - start_conf[2]) direction = th.sign(diff) diff = abs(diff) num_points = math.ceil(diff / m.ANGLE_DIFF) + 1 @@ -117,7 +105,7 @@ def create_state(space, x, y, yaw): state = ob.State(space) state().setX(x) state().setY(y) - state().setYaw(_wrap_angle(yaw)) + state().setYaw(wrap_angle(yaw)) return state def state_valid_fn(q, verbose=False): From c91fd52039130087aa400ecbb8c72ef10d74d141 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Mon, 13 Jan 2025 15:52:13 -0800 Subject: [PATCH 51/76] Bug fixes --- .../starter_semantic_action_primitives.py | 8 ++++++-- .../examples/action_primitives/solve_simple_task.py | 4 ++++ .../examples/action_primitives/wip_solve_behavior_task.py | 2 +- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 30a5b60fc..d075abcd6 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -543,7 +543,9 @@ def _grasp(self, obj): # It's okay if we can't go all the way because we run into the object. indented_print("Performing grasp approach") # Use direct IK to move the hand to the approach pose. - yield from self._move_hand(approach_pose, avoid_collision=False, ignore_objects=[obj]) + yield from self._move_hand( + approach_pose, motion_constraint=[1, 1, 1, 1, 1, 0], avoid_collision=False, ignore_objects=[obj] + ) elif self.robot.grasping_mode == "assisted": indented_print("Performing grasp approach") yield from self._move_hand(approach_pose) @@ -1753,7 +1755,9 @@ def _sample_pose_near_object( avg_arm_workspace_range = th.mean(self.robot.arm_workspace_range[self.arm]) target_pose = ( - (self._sample_position_on_aabb_side(obj), th.tensor([0, 0, 0, 1])) if pose_on_obj is None else pose_on_obj + (self._sample_position_on_aabb_side(obj), self._get_reset_eef_pose()[self.arm][1]) + if pose_on_obj is None + else pose_on_obj ) obj_rooms = ( diff --git a/omnigibson/examples/action_primitives/solve_simple_task.py b/omnigibson/examples/action_primitives/solve_simple_task.py index c2721ffd0..58930400f 100644 --- a/omnigibson/examples/action_primitives/solve_simple_task.py +++ b/omnigibson/examples/action_primitives/solve_simple_task.py @@ -56,6 +56,10 @@ def main(): scene = env.scene robot = env.robots[0] + # Let the objects settle + for _ in range(30): + og.sim.step() + # Allow user to move camera more easily og.sim.enable_viewer_camera_teleoperation() diff --git a/omnigibson/examples/action_primitives/wip_solve_behavior_task.py b/omnigibson/examples/action_primitives/wip_solve_behavior_task.py index bdbe76b8b..20feee743 100644 --- a/omnigibson/examples/action_primitives/wip_solve_behavior_task.py +++ b/omnigibson/examples/action_primitives/wip_solve_behavior_task.py @@ -26,7 +26,7 @@ def main(): picking_up_trash task using a hardcoded sequence of primitives. """ # Load the config - config_filename = os.path.join(og.example_config_path, "fetch_primitives.yaml") + config_filename = os.path.join(og.example_config_path, "tiago_primitives.yaml") config = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader) # Update it to run a grocery shopping task From 44500b866e4fc9307be9d1404b23cf3f6fe95236 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Mon, 13 Jan 2025 16:34:03 -0800 Subject: [PATCH 52/76] Fix curobo motion constraint --- omnigibson/action_primitives/curobo.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/omnigibson/action_primitives/curobo.py b/omnigibson/action_primitives/curobo.py index 8a27e2f8f..26ede9abc 100644 --- a/omnigibson/action_primitives/curobo.py +++ b/omnigibson/action_primitives/curobo.py @@ -564,6 +564,9 @@ def compute_trajectories( success_ratio=1.0 / self.batch_size if success_ratio is None else success_ratio, ) + # Store original target_pos keys before adding dummy targets + original_target_links = set(target_pos.keys()) + # Add the pose cost metric # Details can be found here: https://curobo.org/advanced_examples/3_constrained_planning.html # The motion constraint vector is a 6D vector controlling end-effector movement: @@ -574,9 +577,8 @@ def compute_trajectories( if motion_constraint is None: motion_constraint = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] pose_cost_metric = lazy.curobo.wrap.reacher.motion_gen.PoseCostMetric( - hold_partial_pose=False, hold_vec_weight=self._tensor_args.to_device(motion_constraint) + hold_partial_pose=True, hold_vec_weight=self._tensor_args.to_device(motion_constraint) ) - plan_cfg.pose_cost_metric = pose_cost_metric # Construct initial state q_pos = th.stack([self.robot.get_joint_positions()] * self.batch_size, axis=0) @@ -713,6 +715,10 @@ def compute_trajectories( ik_goal_batch_by_link = None plan_fn = self.plan_batch if not ik_only else self.solve_ik_batch + if self.ee_link[emb_sel] in original_target_links: + plan_cfg.pose_cost_metric = pose_cost_metric + else: + plan_cfg.pose_cost_metric = None result, success, joint_state = plan_fn( cu_js_batch, main_ik_goal_batch, plan_cfg, link_poses=ik_goal_batch_by_link, emb_sel=emb_sel ) From 7a43f38726c317d953de508b6e1358937a6285d4 Mon Sep 17 00:00:00 2001 From: hang-yin Date: Mon, 13 Jan 2025 16:34:24 -0800 Subject: [PATCH 53/76] Fix primitives test --- tests/test_primitives.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/test_primitives.py b/tests/test_primitives.py index f25db2275..ffe638684 100644 --- a/tests/test_primitives.py +++ b/tests/test_primitives.py @@ -75,6 +75,10 @@ def primitive_tester(env, objects, primitives, primitives_args): obj["object"].set_position_orientation(position=obj["position"], orientation=obj["orientation"]) og.sim.step() + # Let the objects settle + for _ in range(30): + og.sim.step() + controller = StarterSemanticActionPrimitives(env, env.robots[0], enable_head_tracking=False) try: for primitive, args in zip(primitives, primitives_args): From 89732add33fbd64050c4a0b43caad52212e6198f Mon Sep 17 00:00:00 2001 From: hang-yin Date: Mon, 13 Jan 2025 19:10:48 -0800 Subject: [PATCH 54/76] Fix tests --- .../controllers/holonomic_base_joint_controller.py | 4 ++-- omnigibson/examples/robots/curobo_example.py | 3 +-- tests/test_curobo.py | 12 ++++++------ tests/test_symbolic_primitives.py | 8 +++++--- 4 files changed, 14 insertions(+), 13 deletions(-) diff --git a/omnigibson/controllers/holonomic_base_joint_controller.py b/omnigibson/controllers/holonomic_base_joint_controller.py index 8c36bd265..288d3a20b 100644 --- a/omnigibson/controllers/holonomic_base_joint_controller.py +++ b/omnigibson/controllers/holonomic_base_joint_controller.py @@ -120,7 +120,7 @@ def _update_goal(self, command, control_dict): # are in the range of [-pi, pi]. This is important because revolute joints in Isaac Sim have a joint position (target) range of [-pi * 2, pi * 2] new_joint_pos = rz_joint_pos + delta_joint_pos - command = cb.cat([position, new_joint_pos]) + command = cb.as_float32(cb.cat([position, new_joint_pos])) else: # Handle velocity/effort control modes # Note: Only rotate the commands, don't translate @@ -138,6 +138,6 @@ def _update_goal(self, command, control_dict): angular_velocity = command[2:3] - command = cb.cat([linear_velocity, angular_velocity]) + command = cb.as_float32(cb.cat([linear_velocity, angular_velocity])) return super()._update_goal(command=command, control_dict=control_dict) diff --git a/omnigibson/examples/robots/curobo_example.py b/omnigibson/examples/robots/curobo_example.py index f4b5fd57c..59f635e6d 100644 --- a/omnigibson/examples/robots/curobo_example.py +++ b/omnigibson/examples/robots/curobo_example.py @@ -98,10 +98,9 @@ def test_curobo(): "grasping_mode": "assisted", "controller_config": { "base": { - "name": "JointController", + "name": "HolonomicBaseJointController", "motor_type": "position", "command_input_limits": None, - "use_delta_commands": False, "use_impedances": True, }, "trunk": { diff --git a/tests/test_curobo.py b/tests/test_curobo.py index 86e0d0de9..b5d7a2219 100644 --- a/tests/test_curobo.py +++ b/tests/test_curobo.py @@ -148,35 +148,35 @@ def test_curobo(): "name": "HolonomicBaseJointController", "motor_type": "position", "command_input_limits": None, - "use_impedances": True, + "use_impedances": False, }, "trunk": { "name": "JointController", "motor_type": "position", "command_input_limits": None, "use_delta_commands": False, - "use_impedances": True, + "use_impedances": False, }, "camera": { "name": "JointController", "motor_type": "position", "command_input_limits": None, "use_delta_commands": False, - "use_impedances": True, + "use_impedances": False, }, "arm_left": { "name": "JointController", "motor_type": "position", "command_input_limits": None, "use_delta_commands": False, - "use_impedances": True, + "use_impedances": False, }, "arm_right": { "name": "JointController", "motor_type": "position", "command_input_limits": None, "use_delta_commands": False, - "use_impedances": True, + "use_impedances": False, }, "gripper_left": { "name": "JointController", @@ -233,7 +233,7 @@ def test_curobo(): batch_size=batch_size, debug=False, use_cuda_graph=True, - collision_activation_distance=0.02, # Use larger activation distance for better reproducibility + collision_activation_distance=0.03, # Use larger activation distance for better reproducibility use_default_embodiment_only=True, ) diff --git a/tests/test_symbolic_primitives.py b/tests/test_symbolic_primitives.py index 5fb352138..1e29bc2fa 100644 --- a/tests/test_symbolic_primitives.py +++ b/tests/test_symbolic_primitives.py @@ -9,10 +9,10 @@ SymbolicSemanticActionPrimitives, SymbolicSemanticActionPrimitiveSet, ) -from omnigibson.macros import gm -gm.USE_GPU_DYNAMICS = True -gm.ENABLE_TRANSITION_RULES = True +# TODO: Using GPU dynamics causes cuda memory issues, need to investigate +# gm.USE_GPU_DYNAMICS = True +# gm.ENABLE_TRANSITION_RULES = True current_robot_type = "R1" @@ -218,6 +218,7 @@ def test_toggle_on(self, env, prim_gen, stove, sink): env.step(action) assert sink.states[object_states.ToggledOn].get_value() + @pytest.mark.skip("Disabled until GPU dynamics does not cause cuda memory issues") def test_soak_under(self, env, prim_gen, robot, sponge, sink): water_system = env.scene.get_system("water") assert not sponge.states[object_states.Saturated].get_value(water_system) @@ -241,6 +242,7 @@ def test_soak_under(self, env, prim_gen, robot, sponge, sink): sink.states[object_states.ToggledOn].set_value(False) assert not sink.states[object_states.ToggledOn].get_value() + @pytest.mark.skip("Disabled until GPU dynamics does not cause cuda memory issues") def test_wipe(self, env, prim_gen, robot, sponge, sink, countertop): # Some pre-assertions water_system = env.scene.get_system("water") From f50696ec01bb1df5fb4d311f55a10f7c55b7c51b Mon Sep 17 00:00:00 2001 From: Chengshu Li Date: Wed, 15 Jan 2025 12:52:05 -0800 Subject: [PATCH 55/76] address comments --- omnigibson/action_primitives/curobo.py | 20 ++----- omnigibson/configs/fetch_primitives.yaml | 66 ----------------------- omnigibson/configs/r1_primitives.yaml | 41 +++++++++----- omnigibson/configs/tiago_primitives.yaml | 10 ++-- omnigibson/robots/holonomic_base_robot.py | 28 ++++------ omnigibson/robots/r1.py | 1 - omnigibson/tasks/grasp_task.py | 2 +- 7 files changed, 46 insertions(+), 122 deletions(-) delete mode 100644 omnigibson/configs/fetch_primitives.yaml diff --git a/omnigibson/action_primitives/curobo.py b/omnigibson/action_primitives/curobo.py index 26ede9abc..25eb1f30a 100644 --- a/omnigibson/action_primitives/curobo.py +++ b/omnigibson/action_primitives/curobo.py @@ -6,7 +6,7 @@ import omnigibson as og import omnigibson.lazy as lazy import omnigibson.utils.transform_utils as T -from omnigibson.macros import create_module_macros, gm +from omnigibson.macros import create_module_macros from omnigibson.prims.rigid_prim import RigidPrim from omnigibson.robots.holonomic_base_robot import HolonomicBaseRobot from omnigibson.utils.constants import JointType @@ -102,8 +102,6 @@ def __init__( # Only support one scene for now -- verify that this is the case assert len(og.sim.scenes) == 1 - assert not gm.ENABLE_FLATCACHE, "CuRobo does not support flatcache for now" - # Store internal variables self._tensor_args = lazy.curobo.types.base.TensorDeviceType(device=th.device(device)) self.debug = debug @@ -564,9 +562,6 @@ def compute_trajectories( success_ratio=1.0 / self.batch_size if success_ratio is None else success_ratio, ) - # Store original target_pos keys before adding dummy targets - original_target_links = set(target_pos.keys()) - # Add the pose cost metric # Details can be found here: https://curobo.org/advanced_examples/3_constrained_planning.html # The motion constraint vector is a 6D vector controlling end-effector movement: @@ -574,11 +569,10 @@ def compute_trajectories( # Linear constraints: [x, y, z] - translation along each axis # Setting any component to 1.0 locks that axis, forcing the planner to reach # the target using only the remaining unlocked axes - if motion_constraint is None: - motion_constraint = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] - pose_cost_metric = lazy.curobo.wrap.reacher.motion_gen.PoseCostMetric( - hold_partial_pose=True, hold_vec_weight=self._tensor_args.to_device(motion_constraint) - ) + if self.ee_link[emb_sel] in target_pos and motion_constraint is not None: + plan_cfg.pose_cost_metric = lazy.curobo.wrap.reacher.motion_gen.PoseCostMetric( + hold_partial_pose=True, hold_vec_weight=self._tensor_args.to_device(motion_constraint) + ) # Construct initial state q_pos = th.stack([self.robot.get_joint_positions()] * self.batch_size, axis=0) @@ -715,10 +709,6 @@ def compute_trajectories( ik_goal_batch_by_link = None plan_fn = self.plan_batch if not ik_only else self.solve_ik_batch - if self.ee_link[emb_sel] in original_target_links: - plan_cfg.pose_cost_metric = pose_cost_metric - else: - plan_cfg.pose_cost_metric = None result, success, joint_state = plan_fn( cu_js_batch, main_ik_goal_batch, plan_cfg, link_poses=ik_goal_batch_by_link, emb_sel=emb_sel ) diff --git a/omnigibson/configs/fetch_primitives.yaml b/omnigibson/configs/fetch_primitives.yaml deleted file mode 100644 index 09978bd0d..000000000 --- a/omnigibson/configs/fetch_primitives.yaml +++ /dev/null @@ -1,66 +0,0 @@ -env: - action_frequency: 30 # (int): environment executes action at the action_frequency rate - physics_frequency: 120 # (int): physics frequency (1 / physics_timestep for physx) - device: null # (None or str): specifies the device to be used if running on the gpu with torch backend - automatic_reset: false # (bool): whether to automatic reset after an episode finishes - flatten_action_space: false # (bool): whether to flatten the action space as a sinle 1D-array - flatten_obs_space: false # (bool): whether the observation space should be flattened when generated - use_external_obs: false # (bool): Whether to use external observations or not - initial_pos_z_offset: 0.1 - external_sensors: null # (None or list): If specified, list of sensor configurations for external sensors to add. Should specify sensor "type" and any additional kwargs to instantiate the sensor. Each entry should be the kwargs passed to @create_sensor, in addition to position, orientation - -render: - viewer_width: 1280 - viewer_height: 720 - -scene: - type: InteractiveTraversableScene - scene_model: Rs_int - trav_map_resolution: 0.1 - default_erosion_radius: 0.0 - trav_map_with_objects: true - num_waypoints: 1 - waypoint_resolution: 0.2 - load_object_categories: null - not_load_object_categories: null - load_room_types: null - load_room_instances: null - load_task_relevant_only: false - seg_map_resolution: 0.1 - scene_source: OG - include_robots: false - -robots: - - type: Fetch - obs_modalities: [rgb] - scale: 1.0 - self_collisions: true - action_normalize: false - action_type: continuous - grasping_mode: sticky - default_arm_pose: diagonal30 - controller_config: - base: - name: DifferentialDriveController - trunk: - name: JointController - arm_0: - name: JointController - subsume_controllers: [trunk] - motor_type: position - command_input_limits: null - use_delta_commands: false - gripper_0: - name: JointController - motor_type: position - command_input_limits: [-1, 1] - command_output_limits: null - use_delta_commands: false - camera: - name: JointController - use_delta_commands: False - -objects: [] - -task: - type: DummyTask \ No newline at end of file diff --git a/omnigibson/configs/r1_primitives.yaml b/omnigibson/configs/r1_primitives.yaml index b3b241349..aed776401 100644 --- a/omnigibson/configs/r1_primitives.yaml +++ b/omnigibson/configs/r1_primitives.yaml @@ -37,6 +37,34 @@ robots: action_normalize: false action_type: continuous grasping_mode: sticky + reset_joint_pos: [ + 0.0000, + -0.0000, + 0.0247, + 0.0009, + 0.0004, + -0.0000, + 0.0000, + 0.0000, + 0.0000, + 0.0000, + -0.0464, + 0.0464, + 2.6172, + 2.6168, + -1.4584, + -1.4570, + -0.0433, + 0.0418, + 1.5899, + -1.5896, + -1.1587, + 1.1593, + 0.0300, + 0.0300, + 0.0300, + 0.0300, + ] sensor_config: VisionSensor: sensor_kwargs: @@ -52,21 +80,18 @@ robots: name: JointController motor_type: position command_input_limits: null - command_output_limits: null use_delta_commands: false use_impedances: false arm_left: name: JointController motor_type: position command_input_limits: null - command_output_limits: null use_delta_commands: false use_impedances: false arm_right: name: JointController motor_type: position command_input_limits: null - command_output_limits: null use_delta_commands: false use_impedances: false gripper_left: @@ -81,17 +106,7 @@ robots: command_input_limits: null use_delta_commands: false use_impedances: false - camera: - name: JointController - use_delta_commands: false - reset_joint_pos: [ 0.0000, -0.0000, 0.0247, 0.0009, 0.0004, - -0.0000, 0.0000, 0.0000, 0.0000, 0.0000, - -0.0464, 0.0464, 2.6172, 2.6168, -1.4584, - -1.4570, -0.0433, 0.0418, 1.5899, -1.5896, - -1.1587, 1.1593, 0.0300, 0.0300, 0.0300, - 0.0300] objects: [] - task: type: DummyTask \ No newline at end of file diff --git a/omnigibson/configs/tiago_primitives.yaml b/omnigibson/configs/tiago_primitives.yaml index 017fe88da..94d050ecb 100644 --- a/omnigibson/configs/tiago_primitives.yaml +++ b/omnigibson/configs/tiago_primitives.yaml @@ -77,47 +77,43 @@ robots: name: HolonomicBaseJointController motor_type: position command_input_limits: null - command_output_limits: null use_impedances: false trunk: name: JointController motor_type: position command_input_limits: null - command_output_limits: null use_delta_commands: false use_impedances: false arm_left: name: JointController motor_type: position command_input_limits: null - command_output_limits: null use_delta_commands: false use_impedances: False arm_right: name: JointController motor_type: position command_input_limits: null - command_output_limits: null use_delta_commands: false use_impedances: false gripper_left: name: JointController motor_type: position command_input_limits: null - command_output_limits: null use_delta_commands: false use_impedances: false gripper_right: name: JointController motor_type: position command_input_limits: null - command_output_limits: null use_delta_commands: false use_impedances: false camera: name: JointController + motor_type: position + command_input_limits: null use_delta_commands: false - + use_impedances: false objects: [] task: diff --git a/omnigibson/robots/holonomic_base_robot.py b/omnigibson/robots/holonomic_base_robot.py index 04360d44c..eba9baf1b 100644 --- a/omnigibson/robots/holonomic_base_robot.py +++ b/omnigibson/robots/holonomic_base_robot.py @@ -212,6 +212,9 @@ def _initialize(self): def apply_action(self, action): j_pos = self.joints["base_footprint_rz_joint"].get_state()[0] + # In preparation for the base controller's @update_goal, we need to wrap the current joint pos + # to be in range [-pi, pi], so that once the command (a delta joint pos in range [-pi, pi]) + # is applied, the final target joint pos is in range [-pi * 2, pi * 2], which is required by Isaac. if j_pos < -math.pi or j_pos > math.pi: j_pos = wrap_angle(j_pos) self.joints["base_footprint_rz_joint"].set_pos(j_pos, drive=False) @@ -369,11 +372,15 @@ def q_to_action(self, q): command = q[controller.dof_idx] if isinstance(controller, HolonomicBaseJointController): # For a holonomic base joint controller, the command should be in the robot local frame + # For orientation, we need to convert the command to a delta angle cur_rz_joint_pos = self.get_joint_positions()[self.base_idx][5] delta_q = wrap_angle(command[2] - cur_rz_joint_pos) - local_pose = self._2d_world_pose_to_robot_pose(command) - command = th.tensor([local_pose[0][0], local_pose[0][1], delta_q]) + # For translation, we need to convert the command to the robot local frame + body_pose = self.get_position_orientation() + canonical_pos = th.tensor([command[0], command[1], body_pose[0][2]], dtype=th.float32) + local_pos = T.relative_pose_transform(canonical_pos, th.tensor([0.0, 0.0, 0.0, 1.0]), *body_pose)[0] + command = th.tensor([local_pos[0], local_pos[1], delta_q]) action.append(controller._reverse_preprocess_command(command)) action = th.cat(action, dim=0) assert ( @@ -381,23 +388,6 @@ def q_to_action(self, q): ), f"Action should have dimension {self.action_dim}, got {action.shape[0]}" return action - def _2d_world_pose_to_robot_pose(self, pose_2d): - """ - Converts 2D pose (x, y, yaw) directly to robot-relative pose - - Args: - pose_2d (Iterable): (x, y, yaw) 2D pose - - Returns: - 2-tuple: - - 3-array: (x,y,z) Position in the robot frame - - 4-array: (x,y,z,w) Quaternion orientation in the robot frame - """ - body_pose = self.get_position_orientation() - world_pos = th.tensor([pose_2d[0], pose_2d[1], body_pose[0][2]], dtype=th.float32) - world_orn = T.euler2quat(th.tensor([0, 0, pose_2d[2]], dtype=th.float32)) - return T.relative_pose_transform(world_pos, world_orn, *body_pose) - def teleop_data_to_action(self, teleop_action) -> th.Tensor: action = ManipulationRobot.teleop_data_to_action(self, teleop_action) action[self.base_action_idx] = th.tensor(teleop_action.base).float() * 0.1 diff --git a/omnigibson/robots/r1.py b/omnigibson/robots/r1.py index e944dbe33..37990888a 100644 --- a/omnigibson/robots/r1.py +++ b/omnigibson/robots/r1.py @@ -243,5 +243,4 @@ def disabled_collision_pairs(self): ["base_link", "wheel_link1"], ["base_link", "wheel_link2"], ["base_link", "wheel_link3"], - ["torso_link2", "torso_link4"], ] diff --git a/omnigibson/tasks/grasp_task.py b/omnigibson/tasks/grasp_task.py index 6471d1205..a676e65e3 100644 --- a/omnigibson/tasks/grasp_task.py +++ b/omnigibson/tasks/grasp_task.py @@ -104,7 +104,7 @@ def _reset_agent(self, env): joint_pos, joint_control_idx = self._get_random_joint_position(robot) initial_joint_pos[control_idx_in_joint_pos] = joint_pos collision_detected = self._primitive_controller._motion_generator.check_collisions( - [initial_joint_pos], self_collision_check=False + [initial_joint_pos], ).cpu()[0] if not collision_detected: robot.set_joint_positions(joint_pos, joint_control_idx) From 2d6a69e174086ddf9f7647b9dcd7524d45141554 Mon Sep 17 00:00:00 2001 From: Chengshu Li Date: Wed, 15 Jan 2025 14:43:20 -0800 Subject: [PATCH 56/76] stick with robot.default_arm for primitive --- .../starter_semantic_action_primitives.py | 123 +++++++----------- 1 file changed, 45 insertions(+), 78 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index d075abcd6..7b8e6e8b4 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -183,7 +183,6 @@ def __init__( self._enable_head_tracking = enable_head_tracking self._always_track_eef = always_track_eef self._tracking_object = None - self._arm = self.robot.default_arm if isinstance(self.robot, ManipulationRobot) else None # Store the current position of the arm as the arm target control_dict = self.robot.get_control_dict() @@ -207,17 +206,8 @@ def __init__( @property def arm(self): - if not isinstance(self.robot, ManipulationRobot): - raise ValueError("Cannot use arm for non-manipulation robot") - return self._arm - - @arm.setter - def arm(self, value): - if not isinstance(self.robot, ManipulationRobot): - raise ValueError("Cannot use arm for non-manipulation robot") - if value not in self.robot.arm_names: - raise ValueError(f"Invalid arm name: {value}. Must be one of {self.robot.arm_names}") - self._arm = value + assert isinstance(self.robot, ManipulationRobot), "Cannot use arm for non-manipulation robot" + return self.robot.default_arm def _postprocess_action(self, action): """Postprocesses action by applying head tracking.""" @@ -564,7 +554,7 @@ def _grasp(self, obj): ) indented_print("Moving hand back") - yield from self._reset_hand(self.arm) + yield from self._reset_hand() indented_print("Done with grasp") @@ -667,7 +657,7 @@ def _place_with_predicate(self, obj, predicate): hand_pose = self._get_hand_pose_for_object_pose(obj_pose) # First check if we can directly move the hand there - target_in_reach = self._target_in_reach_of_robot(hand_pose, skip_obstacle_update=True)[self.arm] + target_in_reach = self._target_in_reach_of_robot(hand_pose, skip_obstacle_update=True) if target_in_reach: yield from self._move_hand(hand_pose) break @@ -707,7 +697,7 @@ def _place_with_predicate(self, obj, predicate): yield from self._reset_hand() - def _convert_cartesian_to_joint_space(self, target_pose, arm=None): + def _convert_cartesian_to_joint_space(self, target_pose): """ Gets joint positions for the arm so eef is at the target pose @@ -719,7 +709,7 @@ def _convert_cartesian_to_joint_space(self, target_pose, arm=None): - th.tensor or None: Joint positions to reach target pose or None if impossible to reach target pose - th.tensor: Indices for joints in the robot """ - joint_pos = self._ik_solver_cartesian_to_joint_space(target_pose, arm=arm, frame="world") + joint_pos = self._ik_solver_cartesian_to_joint_space(target_pose, frame="world") if joint_pos is None: raise ActionPrimitiveError( ActionPrimitiveError.Reason.PLANNING_ERROR, @@ -736,20 +726,16 @@ def _target_in_reach_of_robot(self, target_pose, skip_obstacle_update=False): skip_obstacle_update (bool): Whether to skip updating obstacles Returns: - dict: Whether each eef can reach the target pose + bool: Whether the default eef can reach the target pose """ - target_in_reach = dict() - for arm in self.robot.arm_names: - target_in_reach[arm] = ( - self._ik_solver_cartesian_to_joint_space( - target_pose, - arm=arm, - skip_obstacle_update=skip_obstacle_update, - frame="world", - ) - is not None + return ( + self._ik_solver_cartesian_to_joint_space( + target_pose, + skip_obstacle_update=skip_obstacle_update, + frame="world", ) - return target_in_reach + is not None + ) def _target_in_reach_of_robot_relative(self, relative_target_pose, skip_obstacle_update=False): """ @@ -760,35 +746,28 @@ def _target_in_reach_of_robot_relative(self, relative_target_pose, skip_obstacle skip_obstacle_update (bool): Whether to skip updating obstacles Returns: - dict: Whether each eef can reach the target pose + bool: Whether the default eef can reach the target pose """ - target_in_reach = dict() - for arm in self.robot.arm_names: - target_in_reach[arm] = ( - self._ik_solver_cartesian_to_joint_space( - relative_target_pose, - arm=arm, - skip_obstacle_update=skip_obstacle_update, - frame="robot", - ) - is not None + + return ( + self._ik_solver_cartesian_to_joint_space( + relative_target_pose, + skip_obstacle_update=skip_obstacle_update, + frame="robot", ) - return target_in_reach + is not None + ) - def _manipulation_control_idx(self, arm=None): + def _manipulation_control_idx(self): """The appropriate manipulation control idx for the current settings.""" - if arm is None: - arm = self.arm - - return th.cat([self.robot.trunk_control_idx, self.robot.arm_control_idx[arm]]) + return th.cat([self.robot.trunk_control_idx, self.robot.arm_control_idx[self.arm]]) - def _ik_solver_cartesian_to_joint_space(self, target_pose, arm=None, skip_obstacle_update=False, frame="robot"): + def _ik_solver_cartesian_to_joint_space(self, target_pose, skip_obstacle_update=False, frame="robot"): """ Get joint positions for the arm so eef is at the target pose where the target pose is in the robot frame Args: relative_target_pose (Iterable of array): Position and orientation arrays in an iterable for pose in the robot frame - arm (str): Arm to use for the target pose skip_obstacle_update (bool): Whether to skip updating obstacles frame (str): Frame to use for the target pose @@ -797,9 +776,6 @@ def _ik_solver_cartesian_to_joint_space(self, target_pose, arm=None, skip_obstac - th.tensor or None: Joint positions to reach target pose or None if impossible to reach the target pose - th.tensor: Indices for joints in the robot """ - if arm is None: - arm = self.arm - if frame == "robot": target_pose = self._robot_pose_to_world_pose(target_pose) elif frame == "world": @@ -807,8 +783,8 @@ def _ik_solver_cartesian_to_joint_space(self, target_pose, arm=None, skip_obstac else: raise ValueError(f"Unsupported frame: {frame}") - target_pos = {self.robot.eef_link_names[arm]: target_pose[0]} - target_quat = {self.robot.eef_link_names[arm]: target_pose[1]} + target_pos = {self.robot.eef_link_names[self.arm]: target_pose[0]} + target_quat = {self.robot.eef_link_names[self.arm]: target_pose[1]} successes, joint_states = self._motion_generator.compute_trajectories( target_pos=target_pos, target_quat=target_quat, @@ -828,13 +804,12 @@ def _ik_solver_cartesian_to_joint_space(self, target_pose, arm=None, skip_obstac joint_pos = self._motion_generator.path_to_joint_trajectory( joint_state, get_full_js=False, emb_sel=CuRoboEmbodimentSelection.ARM ) - return joint_pos[self._manipulation_control_idx(arm)].cpu() + return joint_pos[self._manipulation_control_idx()].cpu() def _move_hand( self, target_pose, avoid_collision=True, - arm=None, motion_constraint=None, low_precision=False, lock_auxiliary_arm=False, @@ -846,7 +821,6 @@ def _move_hand( Args: target_pose (Iterable of array): Position and orientation arrays in an iterable for pose avoid_collision (bool): Whether to avoid collision (this need to be false for grasping) - arm (str): Arm to use for the target pose motion_constraint (MotionConstraint): Motion constraint for the motion low_precision (bool): Whether to use low precision for the motion lock_auxiliary_arm (bool): Whether to lock the other arm in place @@ -857,9 +831,6 @@ def _move_hand( """ if self.debug_visual_marker is not None: self.debug_visual_marker.set_position_orientation(*target_pose) - if arm is None: - arm = self.arm - if ignore_objects is not None: self._motion_generator.update_obstacles(ignore_objects=ignore_objects) @@ -873,8 +844,10 @@ def _move_hand( self.robot.eef_link_names[self.arm]: target_pose[1], } else: - left_hand_pos, left_hand_quat = target_pose if arm == "left" else self.robot.get_eef_pose(arm="left") - right_hand_pos, right_hand_quat = target_pose if arm == "right" else self.robot.get_eef_pose(arm="right") + left_hand_pos, left_hand_quat = target_pose if self.arm == "left" else self.robot.get_eef_pose(arm="left") + right_hand_pos, right_hand_quat = ( + target_pose if self.arm == "right" else self.robot.get_eef_pose(arm="right") + ) target_pos = { self.robot.eef_link_names["left"]: left_hand_pos, self.robot.eef_link_names["right"]: right_hand_pos, @@ -1055,7 +1028,7 @@ def _move_hand_direct_joint(self, joint_pos, stop_on_contact=False, ignore_failu self._arm_targets[controller_name] = joint_pos for i in range(m.MAX_STEPS_FOR_HAND_MOVE_JOINT): - current_joint_pos = self.robot.get_joint_positions()[self._manipulation_control_idx(self.arm)] + current_joint_pos = self.robot.get_joint_positions()[self._manipulation_control_idx()] diff_joint_pos = joint_pos - current_joint_pos if th.max(th.abs(diff_joint_pos)).item() < m.JOINT_POS_DIFF_THRESHOLD: return @@ -1237,11 +1210,11 @@ def _move_hand_linearly_cartesian( for joint_pos in joint_space_data: # Check if the movement can be done roughly linearly. - current_joint_positions = self.robot.get_joint_positions()[self._manipulation_control_idx(self.arm)] + current_joint_positions = self.robot.get_joint_positions()[self._manipulation_control_idx()] failed_joints = [] for joint_idx, target_joint_pos, current_joint_pos in zip( - self._manipulation_control_idx(self.arm), joint_pos, current_joint_positions + self._manipulation_control_idx(), joint_pos, current_joint_positions ): if th.abs(target_joint_pos - current_joint_pos) > m.MAX_ALLOWED_JOINT_ERROR_FOR_LINEAR_MOTION: failed_joints.append(joints[joint_idx].joint_name) @@ -1442,7 +1415,7 @@ def _empty_action(self, follow_arm_targets=True): raise ValueError("Unexpected IK control mode") else: target_joint_pos = self._arm_targets[name] - current_joint_pos = self.robot.get_joint_positions()[self._manipulation_control_idx(self.arm)] + current_joint_pos = self.robot.get_joint_positions()[self._manipulation_control_idx()] if controller.use_delta_commands: partial_action = target_joint_pos - current_joint_pos else: @@ -1474,21 +1447,19 @@ def _reset_robot(self): indented_print(f"Plan has {len(q_traj)} steps") yield from self._execute_motion_plan(q_traj, low_precision=True) - def _reset_hand(self, arm=None): + def _reset_hand(self): """ Yields action to move the hand to the position optimal for executing subsequent action primitives Returns: th.tensor or None: Action array for one step for the robot to reset its hand or None if it is done resetting """ - if arm is None: - arm = self.arm indented_print("Resetting hand") # TODO: make this work with both hands - reset_eef_pose = self._get_reset_eef_pose("world")[arm] + reset_eef_pose = self._get_reset_eef_pose("world")[self.arm] if self.debug_visual_marker is not None: self.debug_visual_marker.set_position_orientation(*reset_eef_pose) - yield from self._move_hand(reset_eef_pose, arm=arm, low_precision=True) + yield from self._move_hand(reset_eef_pose, low_precision=True) def _get_reset_eef_pose(self, frame="robot"): """ @@ -1587,10 +1558,10 @@ def _navigate_if_needed(self, obj, pose_on_obj=None, **kwargs): """ self._motion_generator.update_obstacles() if pose_on_obj is not None: - if self._target_in_reach_of_robot(pose_on_obj, skip_obstacle_update=True)[self.arm]: + if self._target_in_reach_of_robot(pose_on_obj, skip_obstacle_update=True): # No need to navigate. return - elif self._target_in_reach_of_robot(obj.get_position_orientation(), skip_obstacle_update=True)[self.arm]: + elif self._target_in_reach_of_robot(obj.get_position_orientation(), skip_obstacle_update=True): return yield from self._navigate_to_obj(obj, pose_on_obj=pose_on_obj, skip_obstacle_update=True, **kwargs) @@ -1898,7 +1869,7 @@ def _sample_pose_with_object_and_predicate( {"target object": target_obj.name, "object in hand": held_obj.name, "relation": pred_map[predicate]}, ) - def _validate_poses(self, candidate_poses, pose_on_obj=None, arm=None, skip_obstacle_update=False): + def _validate_poses(self, candidate_poses, pose_on_obj=None, skip_obstacle_update=False): """ Determines whether the robot can reach all poses on the objects and is not in collision at the specified 2d poses @@ -1906,16 +1877,12 @@ def _validate_poses(self, candidate_poses, pose_on_obj=None, arm=None, skip_obst candidate_poses (list of arrays): Candidate 2d poses (x, y, yaw) pose_on_obj (Iterable of arrays or list of Iterables): Pose(s) on the object(s) in the world frame. Can be a single pose or list of poses. - arm (str): Name of the arm to check reachability for skip_obstacle_update (bool): Whether to skip updating the obstacles in the motion generator Returns: - list of bool: Whether any robot arm can reach all poses on the objects and is not in collision + list of bool: Whether the default arm can reach all poses on the objects and is not in collision at the specified 2d poses """ - if arm is None: - arm = self.arm - # First check collisions for all candidate poses candidate_joint_positions = [] current_joint_pos = self.robot.get_joint_positions() @@ -1948,7 +1915,7 @@ def _validate_poses(self, candidate_poses, pose_on_obj=None, arm=None, skip_obst relative_pose = T.relative_pose_transform(*pose_on_obj, *pose) if not self._target_in_reach_of_robot_relative( relative_pose, skip_obstacle_update=skip_obstacle_update - )[arm]: + ): invalid_results[i] = True return ~invalid_results From 5e608a247e8c311c536a9e07d59908a2c3833ee6 Mon Sep 17 00:00:00 2001 From: Chengshu Li Date: Wed, 15 Jan 2025 14:43:37 -0800 Subject: [PATCH 57/76] remove unnecessary as_float32 in holonomic base joint controller --- omnigibson/controllers/holonomic_base_joint_controller.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/omnigibson/controllers/holonomic_base_joint_controller.py b/omnigibson/controllers/holonomic_base_joint_controller.py index 288d3a20b..8c36bd265 100644 --- a/omnigibson/controllers/holonomic_base_joint_controller.py +++ b/omnigibson/controllers/holonomic_base_joint_controller.py @@ -120,7 +120,7 @@ def _update_goal(self, command, control_dict): # are in the range of [-pi, pi]. This is important because revolute joints in Isaac Sim have a joint position (target) range of [-pi * 2, pi * 2] new_joint_pos = rz_joint_pos + delta_joint_pos - command = cb.as_float32(cb.cat([position, new_joint_pos])) + command = cb.cat([position, new_joint_pos]) else: # Handle velocity/effort control modes # Note: Only rotate the commands, don't translate @@ -138,6 +138,6 @@ def _update_goal(self, command, control_dict): angular_velocity = command[2:3] - command = cb.as_float32(cb.cat([linear_velocity, angular_velocity])) + command = cb.cat([linear_velocity, angular_velocity]) return super()._update_goal(command=command, control_dict=control_dict) From e85848dad766de1e384847be9813ed52f8bbc1bf Mon Sep 17 00:00:00 2001 From: Chengshu Li Date: Thu, 16 Jan 2025 11:32:11 -0800 Subject: [PATCH 58/76] address comments --- omnigibson/action_primitives/curobo.py | 12 +- .../starter_semantic_action_primitives.py | 426 ++++++++++-------- .../action_primitives/rs_int_example.py | 2 + omnigibson/utils/python_utils.py | 25 +- 4 files changed, 263 insertions(+), 202 deletions(-) diff --git a/omnigibson/action_primitives/curobo.py b/omnigibson/action_primitives/curobo.py index 25eb1f30a..9cdb82282 100644 --- a/omnigibson/action_primitives/curobo.py +++ b/omnigibson/action_primitives/curobo.py @@ -443,8 +443,8 @@ def compute_trajectories( return_full_result=False, success_ratio=None, attached_obj=None, - motion_constraint=None, attached_obj_scale=None, + motion_constraint=None, skip_obstacle_update=False, ik_only=False, ik_world_collision_check=True, @@ -481,6 +481,10 @@ def compute_trajectories( link names and the values are the corresponding BaseObject instances to attach to that link attached_obj_scale (None or Dict[str, float]): If specified, a dictionary where the keys are the end-effector link names and the values are the corresponding scale to apply to the attached object + motion_constraint (None or List[float]): If specified, the motion constraint vector is a 6D vector controlling + end-effector movement (angular first, linear next): [qx, qy, qz, x, y, z]. Setting any component to 1.0 + locks that axis, forcing the planner to reach the target using only the remaining unlocked axes. + Details can be found here: https://curobo.org/advanced_examples/3_constrained_planning.html skip_obstacle_update (bool): Whether to skip updating the obstacles in the world collision checker ik_only (bool): Whether to only run the IK solver and not the trajectory optimization ik_world_collision_check (bool): Whether to check for collisions in the world when running the IK solver for ik_only mode @@ -563,12 +567,6 @@ def compute_trajectories( ) # Add the pose cost metric - # Details can be found here: https://curobo.org/advanced_examples/3_constrained_planning.html - # The motion constraint vector is a 6D vector controlling end-effector movement: - # Angular constraints: [qx, qy, qz] - rotation around each axis - # Linear constraints: [x, y, z] - translation along each axis - # Setting any component to 1.0 locks that axis, forcing the planner to reach - # the target using only the remaining unlocked axes if self.ee_link[emb_sel] in target_pos and motion_constraint is not None: plan_cfg.pose_cost_metric = lazy.curobo.wrap.reacher.motion_gen.PoseCostMetric( hold_partial_pose=True, hold_vec_weight=self._tensor_args.to_device(motion_constraint) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 7b8e6e8b4..7958f2d72 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -36,7 +36,9 @@ from omnigibson.tasks.behavior_task import BehaviorTask from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.utils.geometry_utils import wrap_angle -from omnigibson.utils.grasping_planning_utils import get_grasp_poses_for_object_sticky, get_grasp_position_for_open +from omnigibson.utils.grasping_planning_utils import get_grasp_poses_for_object_sticky + +# from omnigibson.utils.grasping_planning_utils import get_grasp_position_for_open from omnigibson.utils.motion_planning_utils import detect_robot_collision_in_sim from omnigibson.utils.object_state_utils import sample_cuboid_for_predicate from omnigibson.utils.python_utils import multi_dim_linspace @@ -67,12 +69,13 @@ R1: 0.2, } +m.DEFAULT_COLLISION_ACTIVATION_DISTANCE = 0.02 m.MAX_PLANNING_ATTEMPTS = 100 m.MAX_IK_FAILURES_BEFORE_RETURN = 50 m.MAX_STEPS_FOR_SETTLING = 500 -m.MAX_STEPS_FOR_JOINT_MOTION = 500 +m.MAX_STEPS_FOR_JOINT_MOTION = 10 m.MAX_CARTESIAN_HAND_STEP = 0.002 m.MAX_STEPS_FOR_HAND_MOVE_JOINT = 500 @@ -111,20 +114,23 @@ def indented_print(msg, *args, **kwargs): print(" " * len(inspect.stack()) + str(msg), *args, **kwargs) +# TODO: understand and rename detect_robot_collision_in_sim + + class StarterSemanticActionPrimitiveSet(IntEnum): _init_ = "value __doc__" GRASP = auto(), "Grasp an object" PLACE_ON_TOP = auto(), "Place the currently grasped object on top of another object" PLACE_INSIDE = auto(), "Place the currently grasped object inside another object" - OPEN = auto(), "Open an object" - CLOSE = auto(), "Close an object" + # OPEN = auto(), "Open an object" + # CLOSE = auto(), "Close an object" NAVIGATE_TO = auto(), "Navigate to an object (mostly for debugging purposes - other primitives also navigate first)" RELEASE = ( auto(), "Release an object, letting it fall to the ground. You can then grasp it again, as a way of reorienting your grasp of the object.", ) - TOGGLE_ON = auto(), "Toggle an object on" - TOGGLE_OFF = auto(), "Toggle an object off" + # TOGGLE_ON = auto(), "Toggle an object on" + # TOGGLE_OFF = auto(), "Toggle an object off" class StarterSemanticActionPrimitives(BaseActionPrimitiveSet): @@ -165,17 +171,21 @@ def __init__( StarterSemanticActionPrimitiveSet.GRASP: self._grasp, StarterSemanticActionPrimitiveSet.PLACE_ON_TOP: self._place_on_top, StarterSemanticActionPrimitiveSet.PLACE_INSIDE: self._place_inside, - StarterSemanticActionPrimitiveSet.OPEN: self._open, - StarterSemanticActionPrimitiveSet.CLOSE: self._close, + # StarterSemanticActionPrimitiveSet.OPEN: self._open, + # StarterSemanticActionPrimitiveSet.CLOSE: self._close, StarterSemanticActionPrimitiveSet.NAVIGATE_TO: self._navigate_to_obj, StarterSemanticActionPrimitiveSet.RELEASE: self._execute_release, - StarterSemanticActionPrimitiveSet.TOGGLE_ON: self._toggle_on, - StarterSemanticActionPrimitiveSet.TOGGLE_OFF: self._toggle_off, + # StarterSemanticActionPrimitiveSet.TOGGLE_ON: self._toggle_on, + # StarterSemanticActionPrimitiveSet.TOGGLE_OFF: self._toggle_off, } self._motion_generator = ( None if skip_curobo_initilization - else CuRoboMotionGenerator(robot=self.robot, batch_size=planning_batch_size) + else CuRoboMotionGenerator( + robot=self.robot, + batch_size=planning_batch_size, + collision_activation_distance=m.DEFAULT_COLLISION_ACTIVATION_DISTANCE, + ) ) self._task_relevant_objects_only = task_relevant_objects_only @@ -311,107 +321,115 @@ def apply_ref(self, primitive, *args, attempts=5): raise ActionPrimitiveErrorGroup(errors) - def _open(self, obj): - yield from self._open_or_close(obj, True) - - def _close(self, obj): - yield from self._open_or_close(obj, False) - - def _open_or_close(self, obj, should_open): - # Update the tracking to track the eef. - self._tracking_object = self.robot - - if self._get_obj_in_hand(): - raise ActionPrimitiveError( - ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, - "Cannot open or close an object while holding an object", - {"object in hand": self._get_obj_in_hand().name}, - ) - - # Open the hand first - yield from self._execute_release() - - for _ in range(m.MAX_ATTEMPTS_FOR_OPEN_CLOSE): - try: - # TODO: This needs to be fixed. Many assumptions (None relevant joint, 3 waypoints, etc.) - if should_open: - grasp_data = get_grasp_position_for_open(self.robot, obj, should_open, None) - else: - grasp_data = get_grasp_position_for_open(self.robot, obj, should_open, None, num_waypoints=3) - - if grasp_data is None: - # We were trying to do something but didn't have the data. - raise ActionPrimitiveError( - ActionPrimitiveError.Reason.SAMPLING_ERROR, - "Could not sample grasp position for target object", - {"target object": obj.name}, - ) - - relevant_joint, grasp_pose, target_poses, object_direction, grasp_required, pos_change = grasp_data - if abs(pos_change) < 0.1: - indented_print("Yaw change is small and done,", pos_change) - return - - # Prepare data for the approach later. - approach_pos = grasp_pose[0] + object_direction * m.OPEN_GRASP_APPROACH_DISTANCE - approach_pose = (approach_pos, grasp_pose[1]) - - # If the grasp pose is too far, navigate - yield from self._navigate_if_needed(obj, pose_on_obj=grasp_pose) - - yield from self._move_hand(grasp_pose, stop_if_stuck=True) - - # We can pre-grasp in sticky grasping mode only for opening - if should_open: - yield from self._execute_grasp() - - # Since the grasp pose is slightly off the object, we want to move towards the object, around 5cm. - # It's okay if we can't go all the way because we run into the object. - yield from self._navigate_if_needed(obj, pose_on_obj=approach_pose) - - if should_open: - yield from self._move_hand_linearly_cartesian( - approach_pose, ignore_failure=False, stop_on_contact=True, stop_if_stuck=True - ) - else: - yield from self._move_hand_linearly_cartesian( - approach_pose, ignore_failure=False, stop_if_stuck=True - ) - - # Step once to update - empty_action = self._empty_action() - yield self._postprocess_action(empty_action) - - for i, target_pose in enumerate(target_poses): - yield from self._move_hand_linearly_cartesian(target_pose, ignore_failure=False, stop_if_stuck=True) - - # Moving to target pose often fails. This might leave the robot's motors with torques that - # try to get to a far-away position thus applying large torques, but unable to move due to - # the sticky grasp joint. Thus if we release the joint, the robot might suddenly launch in an - # arbitrary direction. To avoid this, we command the hand to apply torques with its current - # position as its target. This prevents the hand from jerking into some other position when we do a release. - yield from self._move_hand_linearly_cartesian( - self.robot.eef_links[self.arm].get_position_orientation(), ignore_failure=True, stop_if_stuck=True - ) - - if should_open: - yield from self._execute_release() - yield from self._move_base_backward() - - except ActionPrimitiveError as e: - indented_print(e) - if should_open: - yield from self._execute_release() - yield from self._move_base_backward() - else: - yield from self._move_hand_backward() - - if obj.states[object_states.Open].get_value() != should_open: - raise ActionPrimitiveError( - ActionPrimitiveError.Reason.POST_CONDITION_ERROR, - "Despite executing the planned trajectory, the object did not open or close as expected. Maybe try again", - {"target object": obj.name, "is it currently open": obj.states[object_states.Open].get_value()}, - ) + # # One-attempt debug version + # ctrl = self.controller_functions[primitive] + # yield from ctrl(*args) + # if not self._get_obj_in_hand(): + # yield from self._execute_release() + # yield from self._reset_robot() + # yield from self._settle_robot() + + # def _open(self, obj): + # yield from self._open_or_close(obj, True) + + # def _close(self, obj): + # yield from self._open_or_close(obj, False) + + # def _open_or_close(self, obj, should_open): + # # Update the tracking to track the eef. + # self._tracking_object = self.robot + + # if self._get_obj_in_hand(): + # raise ActionPrimitiveError( + # ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, + # "Cannot open or close an object while holding an object", + # {"object in hand": self._get_obj_in_hand().name}, + # ) + + # # Open the hand first + # yield from self._execute_release() + + # for _ in range(m.MAX_ATTEMPTS_FOR_OPEN_CLOSE): + # try: + # # TODO: This needs to be fixed. Many assumptions (None relevant joint, 3 waypoints, etc.) + # if should_open: + # grasp_data = get_grasp_position_for_open(self.robot, obj, should_open, None) + # else: + # grasp_data = get_grasp_position_for_open(self.robot, obj, should_open, None, num_waypoints=3) + + # if grasp_data is None: + # # We were trying to do something but didn't have the data. + # raise ActionPrimitiveError( + # ActionPrimitiveError.Reason.SAMPLING_ERROR, + # "Could not sample grasp position for target object", + # {"target object": obj.name}, + # ) + + # relevant_joint, grasp_pose, target_poses, object_direction, grasp_required, pos_change = grasp_data + # if abs(pos_change) < 0.1: + # indented_print("Yaw change is small and done,", pos_change) + # return + + # # Prepare data for the approach later. + # approach_pos = grasp_pose[0] + object_direction * m.OPEN_GRASP_APPROACH_DISTANCE + # approach_pose = (approach_pos, grasp_pose[1]) + + # # If the grasp pose is too far, navigate + # yield from self._navigate_if_needed(obj, pose_on_obj=grasp_pose) + + # yield from self._move_hand(grasp_pose, stop_if_stuck=True) + + # # We can pre-grasp in sticky grasping mode only for opening + # if should_open: + # yield from self._execute_grasp() + + # # Since the grasp pose is slightly off the object, we want to move towards the object, around 5cm. + # # It's okay if we can't go all the way because we run into the object. + # yield from self._navigate_if_needed(obj, pose_on_obj=approach_pose) + + # if should_open: + # yield from self._move_hand_linearly_cartesian( + # approach_pose, ignore_failure=False, stop_on_contact=True, stop_if_stuck=True + # ) + # else: + # yield from self._move_hand_linearly_cartesian( + # approach_pose, ignore_failure=False, stop_if_stuck=True + # ) + + # # Step once to update + # empty_action = self._empty_action() + # yield self._postprocess_action(empty_action) + + # for i, target_pose in enumerate(target_poses): + # yield from self._move_hand_linearly_cartesian(target_pose, ignore_failure=False, stop_if_stuck=True) + + # # Moving to target pose often fails. This might leave the robot's motors with torques that + # # try to get to a far-away position thus applying large torques, but unable to move due to + # # the sticky grasp joint. Thus if we release the joint, the robot might suddenly launch in an + # # arbitrary direction. To avoid this, we command the hand to apply torques with its current + # # position as its target. This prevents the hand from jerking into some other position when we do a release. + # yield from self._move_hand_linearly_cartesian( + # self.robot.eef_links[self.arm].get_position_orientation(), ignore_failure=True, stop_if_stuck=True + # ) + + # if should_open: + # yield from self._execute_release() + # yield from self._move_base_backward() + + # except ActionPrimitiveError as e: + # indented_print(e) + # if should_open: + # yield from self._execute_release() + # yield from self._move_base_backward() + # else: + # yield from self._move_hand_backward() + + # if obj.states[object_states.Open].get_value() != should_open: + # raise ActionPrimitiveError( + # ActionPrimitiveError.Reason.POST_CONDITION_ERROR, + # "Despite executing the planned trajectory, the object did not open or close as expected. Maybe try again", + # {"target object": obj.name, "is it currently open": obj.states[object_states.Open].get_value()}, + # ) # TODO: Figure out how to generalize out of this "backing out" behavior. def _move_base_backward(self, steps=5, speed=0.2): @@ -482,6 +500,13 @@ def _grasp(self, obj): "You need to provide an object to grasp", {"provided object": obj}, ) + if self.robot.grasping_mode not in ["sticky", "assisted"]: + raise ActionPrimitiveError( + ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, + "Grasping mode not supported", + {"grasping mode": self.robot.grasping_mode}, + ) + # Update the tracking to track the object. self._tracking_object = obj @@ -529,12 +554,13 @@ def _grasp(self, obj): # Pre-grasp in sticky grasping mode. indented_print("Pregrasp squeeze") yield from self._execute_grasp() - # Since the grasp pose is slightly off the object, we want to move towards the object, around 5cm. + # Since the pregrasp pose is slightly away from the object, we want to move towards the object # It's okay if we can't go all the way because we run into the object. indented_print("Performing grasp approach") - # Use direct IK to move the hand to the approach pose. + # Only translate in the z direction. + # Ignore the object during motion planning because the fingers are closed. yield from self._move_hand( - approach_pose, motion_constraint=[1, 1, 1, 1, 1, 0], avoid_collision=False, ignore_objects=[obj] + approach_pose, motion_constraint=[1, 1, 1, 1, 1, 0], stop_on_contact=True, ignore_objects=[obj] ) elif self.robot.grasping_mode == "assisted": indented_print("Performing grasp approach") @@ -589,36 +615,36 @@ def _place_inside(self, obj): """ yield from self._place_with_predicate(obj, object_states.Inside) - def _toggle_on(self, obj): - yield from self._toggle(obj, True) + # def _toggle_on(self, obj): + # yield from self._toggle(obj, True) - def _toggle_off(self, obj): - yield from self._toggle(obj, False) + # def _toggle_off(self, obj): + # yield from self._toggle(obj, False) - def _toggle(self, obj, value): - if obj.states[object_states.ToggledOn].get_value() == value: - return + # def _toggle(self, obj, value): + # if obj.states[object_states.ToggledOn].get_value() == value: + # return - # Put the hand in the toggle marker. - toggle_state = obj.states[object_states.ToggledOn] - toggle_position = toggle_state.get_link_position() - yield from self._navigate_if_needed(obj, toggle_position) + # # Put the hand in the toggle marker. + # toggle_state = obj.states[object_states.ToggledOn] + # toggle_position = toggle_state.get_link_position() + # yield from self._navigate_if_needed(obj, toggle_position) - # Just keep the current hand orientation. - hand_orientation = self.robot.eef_links[self.arm].get_position_orientation()[1] - desired_hand_pose = (toggle_position, hand_orientation) + # # Just keep the current hand orientation. + # hand_orientation = self.robot.eef_links[self.arm].get_position_orientation()[1] + # desired_hand_pose = (toggle_position, hand_orientation) - yield from self._move_hand(desired_hand_pose) + # yield from self._move_hand(desired_hand_pose) - if obj.states[object_states.ToggledOn].get_value() != value: - raise ActionPrimitiveError( - ActionPrimitiveError.Reason.POST_CONDITION_ERROR, - "The object did not toggle as expected - maybe try again", - { - "target object": obj.name, - "is it currently toggled on": obj.states[object_states.ToggledOn].get_value(), - }, - ) + # if obj.states[object_states.ToggledOn].get_value() != value: + # raise ActionPrimitiveError( + # ActionPrimitiveError.Reason.POST_CONDITION_ERROR, + # "The object did not toggle as expected - maybe try again", + # { + # "target object": obj.name, + # "is it currently toggled on": obj.states[object_states.ToggledOn].get_value(), + # }, + # ) def _place_with_predicate(self, obj, predicate): """ @@ -785,22 +811,37 @@ def _ik_solver_cartesian_to_joint_space(self, target_pose, skip_obstacle_update= target_pos = {self.robot.eef_link_names[self.arm]: target_pose[0]} target_quat = {self.robot.eef_link_names[self.arm]: target_pose[1]} + + target_pos = {k: th.stack([v for _ in range(self._motion_generator.batch_size)]) for k, v in target_pos.items()} + target_quat = { + k: th.stack([v for _ in range(self._motion_generator.batch_size)]) for k, v in target_quat.items() + } + successes, joint_states = self._motion_generator.compute_trajectories( target_pos=target_pos, target_quat=target_quat, is_local=False, max_attempts=math.ceil(m.MAX_PLANNING_ATTEMPTS / self._motion_generator.batch_size), + timeout=60.0, + ik_fail_return=m.MAX_IK_FAILURES_BEFORE_RETURN, + enable_finetune_trajopt=False, + finetune_attempts=0, return_full_result=False, + success_ratio=1.0 / self._motion_generator.batch_size, + attached_obj=None, + attached_obj_scale=None, + motion_constraint=None, + skip_obstacle_update=skip_obstacle_update, ik_only=True, - ik_fail_return=m.MAX_IK_FAILURES_BEFORE_RETURN, ik_world_collision_check=False, - skip_obstacle_update=skip_obstacle_update, emb_sel=CuRoboEmbodimentSelection.ARM, ) - if not successes[0]: + # Grab the first successful joint state if found + success_idx = th.where(successes)[0].cpu() + if len(success_idx) == 0: return None - joint_state = joint_states[0] + joint_state = joint_states[success_idx[0]] joint_pos = self._motion_generator.path_to_joint_trajectory( joint_state, get_full_js=False, emb_sel=CuRoboEmbodimentSelection.ARM ) @@ -809,7 +850,7 @@ def _ik_solver_cartesian_to_joint_space(self, target_pose, skip_obstacle_update= def _move_hand( self, target_pose, - avoid_collision=True, + stop_on_contact=False, motion_constraint=None, low_precision=False, lock_auxiliary_arm=False, @@ -820,7 +861,7 @@ def _move_hand( Args: target_pose (Iterable of array): Position and orientation arrays in an iterable for pose - avoid_collision (bool): Whether to avoid collision (this need to be false for grasping) + stop_on_contact (bool): Whether to stop executing motion plan if contact is detected motion_constraint (MotionConstraint): Motion constraint for the motion low_precision (bool): Whether to use low precision for the motion lock_auxiliary_arm (bool): Whether to lock the other arm in place @@ -831,8 +872,6 @@ def _move_hand( """ if self.debug_visual_marker is not None: self.debug_visual_marker.set_position_orientation(*target_pose) - if ignore_objects is not None: - self._motion_generator.update_obstacles(ignore_objects=ignore_objects) yield from self._settle_robot() # curobo motion generator takes a pose but outputs joint positions @@ -861,11 +900,11 @@ def _move_hand( target_quat=target_quat, embodiment_selection=CuRoboEmbodimentSelection.ARM, motion_constraint=motion_constraint, - skip_obstalce_update=ignore_objects is not None, + ignore_objects=ignore_objects, ) indented_print(f"Plan has {len(q_traj)} steps") - yield from self._execute_motion_plan(q_traj, stop_on_contact=not avoid_collision, low_precision=low_precision) + yield from self._execute_motion_plan(q_traj, stop_on_contact=stop_on_contact, low_precision=low_precision) def _plan_joint_motion( self, @@ -874,21 +913,21 @@ def _plan_joint_motion( embodiment_selection=CuRoboEmbodimentSelection.DEFAULT, motion_constraint=None, skip_obstalce_update=False, + ignore_objects=None, ): # If an object is grasped, we need to pass it to the motion planner obj_in_hand = self._get_obj_in_hand() attached_obj = {self.robot.eef_link_names[self.arm]: obj_in_hand.root_link} if obj_in_hand is not None else None - success = False - traj_path = None - # aggregate target_pos and target_quat to match batch_size + # Aggregate target_pos and target_quat to match batch_size target_pos = {k: th.stack([v for _ in range(self._motion_generator.batch_size)]) for k, v in target_pos.items()} target_quat = { k: th.stack([v for _ in range(self._motion_generator.batch_size)]) for k, v in target_quat.items() } if not skip_obstalce_update: - self._motion_generator.update_obstacles() + self._motion_generator.update_obstacles(ignore_objects=ignore_objects) + successes, traj_paths = self._motion_generator.compute_trajectories( target_pos=target_pos, target_quat=target_quat, @@ -899,22 +938,24 @@ def _plan_joint_motion( enable_finetune_trajopt=True, finetune_attempts=1, return_full_result=False, + success_ratio=1.0 / self._motion_generator.batch_size, attached_obj=attached_obj, + attached_obj_scale=None, motion_constraint=motion_constraint, skip_obstacle_update=True, + ik_only=False, + ik_world_collision_check=True, emb_sel=embodiment_selection, ) - # Grab the first successful trajectory, if not found, then continue planning + # Grab the first successful trajectory if found success_idx = th.where(successes)[0].cpu() - if len(success_idx) > 0: - success = True - traj_path = traj_paths[success_idx[0]] - if not success: + if len(success_idx) == 0: raise ActionPrimitiveError( ActionPrimitiveError.Reason.PLANNING_ERROR, "There is no accessible path from where you are to the desired pose. Try again", ) + traj_path = traj_paths[success_idx[0]] q_traj = self._motion_generator.path_to_joint_trajectory( traj_path, get_full_js=True, emb_sel=embodiment_selection ).cpu() @@ -929,7 +970,6 @@ def _execute_motion_plan( ): for i, joint_pos in enumerate(q_traj): indented_print(f"Executing motion plan step {i + 1}/{len(q_traj)}") - if ignore_physics: self.robot.set_joint_positions(joint_pos) og.sim.step() @@ -937,18 +977,15 @@ def _execute_motion_plan( return else: # Convert target joint positions to command - action = self.robot.q_to_action(joint_pos) - base_target_reached = False articulation_target_reached = False collision_detected = False - articulation_control_idx = th.cat( - ( - self.robot.arm_control_idx[self.arm], - self.robot.trunk_control_idx, - ) - ) - for _ in range(m.MAX_STEPS_FOR_JOINT_MOTION): + articulation_control_idx = [self.robot.trunk_control_idx] + for arm in self.robot.arm_names: + articulation_control_idx.append(self.robot.arm_control_idx[arm]) + articulation_control_idx = th.cat(articulation_control_idx) + for j in range(m.MAX_STEPS_FOR_JOINT_MOTION): + # indented_print(f"Step {j + 1}/{m.MAX_STEPS_FOR_JOINT_MOTION}") current_joint_pos = self.robot.get_joint_positions() joint_pos_diff = joint_pos - current_joint_pos base_joint_diff = joint_pos_diff[self.robot.base_control_idx] @@ -956,30 +993,42 @@ def _execute_motion_plan( articulation_threshold = ( m.JOINT_POS_DIFF_THRESHOLD if not low_precision else m.LOW_PRECISION_JOINT_POS_DIFF_THRESHOLD ) - if th.max(th.abs(articulation_joint_diff)).item() < articulation_threshold: + max_articulation_joint_diff = th.max(th.abs(articulation_joint_diff)).item() + if max_articulation_joint_diff < articulation_threshold: articulation_target_reached = True - if ( - th.max(th.abs(base_joint_diff[:2])).item() < m.DEFAULT_DIST_THRESHOLD - and wrap_angle(base_joint_diff[2]) < m.DEFAULT_ANGLE_THRESHOLD - ): + + max_base_pos_diff = th.max(th.abs(base_joint_diff[:2])).item() + max_base_orn_diff = th.abs(wrap_angle(base_joint_diff[2])) + if max_base_pos_diff < m.DEFAULT_DIST_THRESHOLD and max_base_orn_diff < m.DEFAULT_ANGLE_THRESHOLD: base_target_reached = True + + # Early stopping if the base, trunk and arm joints are at the target positions if base_target_reached and articulation_target_reached: break + collision_detected = detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=True) if stop_on_contact and collision_detected: + indented_print(f"Collision detected at step {i + 1}/{len(q_traj)}") return + + # We need to call @q_to_action for every step because as the robot base moves, the same base joint_pos will be + # converted to different actions, since HolonomicBaseJointController accepts an action in the robot local frame. + action = self.robot.q_to_action(joint_pos) + yield self._postprocess_action(action) - action = self.robot.q_to_action( - joint_pos - ) # This is needed for holonomic base joint controller to update local frame pose if not ignore_failure: if not base_target_reached: + indented_print(f"max base pos diff: {max_base_pos_diff}") + indented_print(f"max base angle diff: {max_base_orn_diff}") + # breakpoint() raise ActionPrimitiveError( ActionPrimitiveError.Reason.EXECUTION_ERROR, "Could not reach the target base joint positions. Try again", ) if not articulation_target_reached: + indented_print(f"max articulation joint diff: {max_articulation_joint_diff}") + # breakpoint() raise ActionPrimitiveError( ActionPrimitiveError.Reason.EXECUTION_ERROR, "Could not reach the target articulation joint positions. Try again", @@ -1002,7 +1051,7 @@ def _add_linearly_interpolated_waypoints(self, plan, max_inter_dist=0.01): # Calculate maximum difference across all dimensions max_diff = (plan[i + 1] - plan[i]).abs().max() num_intervals = math.ceil(max_diff.item() / max_inter_dist) - interpolated_plan += multi_dim_linspace(plan[i], plan[i + 1], num_intervals) + interpolated_plan += multi_dim_linspace(plan[i], plan[i + 1], num_intervals, endpoint=False) interpolated_plan.append(plan[-1]) return interpolated_plan @@ -1257,18 +1306,16 @@ def _move_fingers_to_limit(self, limit_type): Yields: th.tensor or None: Action array for one step for the robot to move fingers or None if done. """ - q = self.robot.get_joint_positions() - joint_names = list(self.robot.joints.keys()) - for finger_joint in self.robot.finger_joints[self.arm]: - idx = joint_names.index(finger_joint.joint_name) - q[idx] = getattr(finger_joint, f"{limit_type}_limit") - action = self.robot.q_to_action(q) - finger_joint_limits = getattr(self.robot, f"joint_{limit_type}_limits")[ - self.robot.gripper_control_idx[self.arm] - ] - + target_joint_positions = self.robot.get_joint_positions() + gripper_ctrl_idx = self.robot.gripper_control_idx[self.arm] + if limit_type == self.robot._grasping_direction: + finger_joint_limits = self.robot.joint_lower_limits[gripper_ctrl_idx] + else: + finger_joint_limits = self.robot.joint_upper_limits[gripper_ctrl_idx] + target_joint_positions[gripper_ctrl_idx] = finger_joint_limits + action = self.robot.q_to_action(target_joint_positions) for _ in range(m.MAX_STEPS_FOR_GRASP_OR_RELEASE): - finger_joint_positions = self.robot.get_joint_positions()[self.robot.gripper_control_idx[self.arm]] + finger_joint_positions = self.robot.get_joint_positions()[gripper_ctrl_idx] if th.allclose(finger_joint_positions, finger_joint_limits, atol=0.005): break elif limit_type == "lower" and self._get_obj_in_hand() is not None: @@ -1433,6 +1480,7 @@ def _reset_robot(self): Returns: th.tensor or None: Action array for one step for the robot to reset its hands or None if it is done resetting """ + indented_print("Resetting robot") target_pos = dict() target_quat = dict() for arm in self.robot.arm_names: @@ -1519,7 +1567,7 @@ def _navigate_to_pose(self, pose_2d): target_quat = {self.robot.base_footprint_link_name: pose_3d[1]} q_traj = self._plan_joint_motion(target_pos, target_quat, CuRoboEmbodimentSelection.BASE) - yield from self._execute_motion_plan(q_traj, stop_on_contact=False) + yield from self._execute_motion_plan(q_traj) def _draw_plan(self, plan): SEARCHED = [] diff --git a/omnigibson/examples/action_primitives/rs_int_example.py b/omnigibson/examples/action_primitives/rs_int_example.py index eed2139a2..b784f1c5a 100644 --- a/omnigibson/examples/action_primitives/rs_int_example.py +++ b/omnigibson/examples/action_primitives/rs_int_example.py @@ -85,6 +85,8 @@ def main(): execute_controller(controller.apply_ref(StarterSemanticActionPrimitiveSet.PLACE_ON_TOP, coffee_table), env) print("Finished executing place") + og.shutdown() + if __name__ == "__main__": main() diff --git a/omnigibson/utils/python_utils.py b/omnigibson/utils/python_utils.py index a999bbfa2..0dfb8f668 100644 --- a/omnigibson/utils/python_utils.py +++ b/omnigibson/utils/python_utils.py @@ -810,8 +810,8 @@ def h5py_group_to_torch(group): return state -@th.jit.script -def multi_dim_linspace(start: th.Tensor, stop: th.Tensor, num: int) -> th.Tensor: +# @th.jit.script +def multi_dim_linspace(start: th.Tensor, stop: th.Tensor, num: int, endpoint: bool = True) -> th.Tensor: """ Generate a tensor with evenly spaced values along multiple dimensions. This function creates a tensor where each slice along the first dimension @@ -822,12 +822,13 @@ def multi_dim_linspace(start: th.Tensor, stop: th.Tensor, num: int) -> th.Tensor start (th.Tensor): Starting values for each dimension. stop (th.Tensor): Ending values for each dimension. num (int): Number of samples to generate along the interpolated dimension. + endpoint (bool, optional): If True, stop is the last sample. Otherwise, it is not included. Returns: th.Tensor: A tensor of shape (num, *start.shape) containing the interpolated values. Example: - >>> start = th.tensor([0, 10, 100]) - >>> stop = th.tensor([1, 20, 200]) - >>> result = multi_dim_linspace(start, stop, num=5) + >>> start = th.tensor([0.0, 10.0, 100.0]) + >>> stop = th.tensor([1.0, 20.0, 200.0]) + >>> result = multi_dim_linspace(start, stop, num=5, endpoint=True) >>> print(result.shape) torch.Size([5, 3]) >>> print(result) @@ -836,8 +837,20 @@ def multi_dim_linspace(start: th.Tensor, stop: th.Tensor, num: int) -> th.Tensor [ 0.5000, 15.0000, 150.0000], [ 0.7500, 17.5000, 175.0000], [ 1.0000, 20.0000, 200.0000]]) + >>> result = multi_dim_linspace(start, stop, num=5, endpoint=False) + >>> print(result.shape) + torch.Size([5, 3]) + >>> print(result) + tensor([[ 0.0000, 10.0000, 100.0000], + [ 0.2000, 12.0000, 120.0000], + [ 0.4000, 14.0000, 140.0000], + [ 0.6000, 16.0000, 160.0000], + [ 0.8000, 18.0000, 180.0000]]) """ - steps = th.linspace(0, 1, num, dtype=start.dtype, device=start.device) + if endpoint: + steps = th.linspace(0, 1, num, dtype=start.dtype, device=start.device) + else: + steps = th.linspace(0, 1, num + 1, dtype=start.dtype, device=start.device)[:-1] # Create a new shape for broadcasting new_shape = [num] + [1] * start.dim() From 7350d2aace0cb072d4d8e336b2fefc4e38d6ccc8 Mon Sep 17 00:00:00 2001 From: Chengshu Li Date: Thu, 16 Jan 2025 17:48:42 -0800 Subject: [PATCH 59/76] address comments --- .../starter_semantic_action_primitives.py | 333 +++++++++--------- .../action_primitives/rs_int_example.py | 8 +- omnigibson/object_states/contact_bodies.py | 7 +- omnigibson/objects/dataset_object.py | 7 +- omnigibson/robots/fetch.py | 8 +- omnigibson/utils/motion_planning_utils.py | 35 +- tests/test_robot_states_flatcache.py | 25 +- 7 files changed, 210 insertions(+), 213 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 7958f2d72..932b69bdc 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -36,9 +36,7 @@ from omnigibson.tasks.behavior_task import BehaviorTask from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.utils.geometry_utils import wrap_angle -from omnigibson.utils.grasping_planning_utils import get_grasp_poses_for_object_sticky - -# from omnigibson.utils.grasping_planning_utils import get_grasp_position_for_open +from omnigibson.utils.grasping_planning_utils import get_grasp_poses_for_object_sticky, get_grasp_position_for_open from omnigibson.utils.motion_planning_utils import detect_robot_collision_in_sim from omnigibson.utils.object_state_utils import sample_cuboid_for_predicate from omnigibson.utils.python_utils import multi_dim_linspace @@ -114,23 +112,23 @@ def indented_print(msg, *args, **kwargs): print(" " * len(inspect.stack()) + str(msg), *args, **kwargs) -# TODO: understand and rename detect_robot_collision_in_sim - - class StarterSemanticActionPrimitiveSet(IntEnum): _init_ = "value __doc__" GRASP = auto(), "Grasp an object" PLACE_ON_TOP = auto(), "Place the currently grasped object on top of another object" PLACE_INSIDE = auto(), "Place the currently grasped object inside another object" - # OPEN = auto(), "Open an object" - # CLOSE = auto(), "Close an object" + OPEN = auto(), "Open an object" + CLOSE = auto(), "Close an object" NAVIGATE_TO = auto(), "Navigate to an object (mostly for debugging purposes - other primitives also navigate first)" RELEASE = ( auto(), "Release an object, letting it fall to the ground. You can then grasp it again, as a way of reorienting your grasp of the object.", ) - # TOGGLE_ON = auto(), "Toggle an object on" - # TOGGLE_OFF = auto(), "Toggle an object off" + TOGGLE_ON = auto(), "Toggle an object on" + TOGGLE_OFF = auto(), "Toggle an object off" + + +# TODO: overwrite_head_action might move the head that causes self-collision with curobo-generated motion plans class StarterSemanticActionPrimitives(BaseActionPrimitiveSet): @@ -171,12 +169,12 @@ def __init__( StarterSemanticActionPrimitiveSet.GRASP: self._grasp, StarterSemanticActionPrimitiveSet.PLACE_ON_TOP: self._place_on_top, StarterSemanticActionPrimitiveSet.PLACE_INSIDE: self._place_inside, - # StarterSemanticActionPrimitiveSet.OPEN: self._open, - # StarterSemanticActionPrimitiveSet.CLOSE: self._close, + StarterSemanticActionPrimitiveSet.OPEN: self._open, + StarterSemanticActionPrimitiveSet.CLOSE: self._close, StarterSemanticActionPrimitiveSet.NAVIGATE_TO: self._navigate_to_obj, StarterSemanticActionPrimitiveSet.RELEASE: self._execute_release, - # StarterSemanticActionPrimitiveSet.TOGGLE_ON: self._toggle_on, - # StarterSemanticActionPrimitiveSet.TOGGLE_OFF: self._toggle_off, + StarterSemanticActionPrimitiveSet.TOGGLE_ON: self._toggle_on, + StarterSemanticActionPrimitiveSet.TOGGLE_OFF: self._toggle_off, } self._motion_generator = ( None @@ -187,6 +185,7 @@ def __init__( collision_activation_distance=m.DEFAULT_COLLISION_ACTIVATION_DISTANCE, ) ) + self._reset_eef_pose = {arm: self.robot.get_relative_eef_pose(arm) for arm in self.robot.arm_names} self._task_relevant_objects_only = task_relevant_objects_only @@ -321,7 +320,7 @@ def apply_ref(self, primitive, *args, attempts=5): raise ActionPrimitiveErrorGroup(errors) - # # One-attempt debug version + # One-attempt debug version # ctrl = self.controller_functions[primitive] # yield from ctrl(*args) # if not self._get_obj_in_hand(): @@ -329,107 +328,109 @@ def apply_ref(self, primitive, *args, attempts=5): # yield from self._reset_robot() # yield from self._settle_robot() - # def _open(self, obj): - # yield from self._open_or_close(obj, True) - - # def _close(self, obj): - # yield from self._open_or_close(obj, False) - - # def _open_or_close(self, obj, should_open): - # # Update the tracking to track the eef. - # self._tracking_object = self.robot - - # if self._get_obj_in_hand(): - # raise ActionPrimitiveError( - # ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, - # "Cannot open or close an object while holding an object", - # {"object in hand": self._get_obj_in_hand().name}, - # ) - - # # Open the hand first - # yield from self._execute_release() - - # for _ in range(m.MAX_ATTEMPTS_FOR_OPEN_CLOSE): - # try: - # # TODO: This needs to be fixed. Many assumptions (None relevant joint, 3 waypoints, etc.) - # if should_open: - # grasp_data = get_grasp_position_for_open(self.robot, obj, should_open, None) - # else: - # grasp_data = get_grasp_position_for_open(self.robot, obj, should_open, None, num_waypoints=3) - - # if grasp_data is None: - # # We were trying to do something but didn't have the data. - # raise ActionPrimitiveError( - # ActionPrimitiveError.Reason.SAMPLING_ERROR, - # "Could not sample grasp position for target object", - # {"target object": obj.name}, - # ) - - # relevant_joint, grasp_pose, target_poses, object_direction, grasp_required, pos_change = grasp_data - # if abs(pos_change) < 0.1: - # indented_print("Yaw change is small and done,", pos_change) - # return - - # # Prepare data for the approach later. - # approach_pos = grasp_pose[0] + object_direction * m.OPEN_GRASP_APPROACH_DISTANCE - # approach_pose = (approach_pos, grasp_pose[1]) - - # # If the grasp pose is too far, navigate - # yield from self._navigate_if_needed(obj, pose_on_obj=grasp_pose) - - # yield from self._move_hand(grasp_pose, stop_if_stuck=True) - - # # We can pre-grasp in sticky grasping mode only for opening - # if should_open: - # yield from self._execute_grasp() - - # # Since the grasp pose is slightly off the object, we want to move towards the object, around 5cm. - # # It's okay if we can't go all the way because we run into the object. - # yield from self._navigate_if_needed(obj, pose_on_obj=approach_pose) - - # if should_open: - # yield from self._move_hand_linearly_cartesian( - # approach_pose, ignore_failure=False, stop_on_contact=True, stop_if_stuck=True - # ) - # else: - # yield from self._move_hand_linearly_cartesian( - # approach_pose, ignore_failure=False, stop_if_stuck=True - # ) - - # # Step once to update - # empty_action = self._empty_action() - # yield self._postprocess_action(empty_action) - - # for i, target_pose in enumerate(target_poses): - # yield from self._move_hand_linearly_cartesian(target_pose, ignore_failure=False, stop_if_stuck=True) - - # # Moving to target pose often fails. This might leave the robot's motors with torques that - # # try to get to a far-away position thus applying large torques, but unable to move due to - # # the sticky grasp joint. Thus if we release the joint, the robot might suddenly launch in an - # # arbitrary direction. To avoid this, we command the hand to apply torques with its current - # # position as its target. This prevents the hand from jerking into some other position when we do a release. - # yield from self._move_hand_linearly_cartesian( - # self.robot.eef_links[self.arm].get_position_orientation(), ignore_failure=True, stop_if_stuck=True - # ) - - # if should_open: - # yield from self._execute_release() - # yield from self._move_base_backward() - - # except ActionPrimitiveError as e: - # indented_print(e) - # if should_open: - # yield from self._execute_release() - # yield from self._move_base_backward() - # else: - # yield from self._move_hand_backward() - - # if obj.states[object_states.Open].get_value() != should_open: - # raise ActionPrimitiveError( - # ActionPrimitiveError.Reason.POST_CONDITION_ERROR, - # "Despite executing the planned trajectory, the object did not open or close as expected. Maybe try again", - # {"target object": obj.name, "is it currently open": obj.states[object_states.Open].get_value()}, - # ) + def _open(self, obj): + yield from self._open_or_close(obj, True) + + def _close(self, obj): + yield from self._open_or_close(obj, False) + + def _open_or_close(self, obj, should_open): + raise NotImplementedError("Open/close is not implemented correctly yet.") + + # Update the tracking to track the eef. + self._tracking_object = self.robot + + if self._get_obj_in_hand(): + raise ActionPrimitiveError( + ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, + "Cannot open or close an object while holding an object", + {"object in hand": self._get_obj_in_hand().name}, + ) + + # Open the hand first + yield from self._execute_release() + + for _ in range(m.MAX_ATTEMPTS_FOR_OPEN_CLOSE): + try: + # TODO: This needs to be fixed. Many assumptions (None relevant joint, 3 waypoints, etc.) + if should_open: + grasp_data = get_grasp_position_for_open(self.robot, obj, should_open, None) + else: + grasp_data = get_grasp_position_for_open(self.robot, obj, should_open, None, num_waypoints=3) + + if grasp_data is None: + # We were trying to do something but didn't have the data. + raise ActionPrimitiveError( + ActionPrimitiveError.Reason.SAMPLING_ERROR, + "Could not sample grasp position for target object", + {"target object": obj.name}, + ) + + relevant_joint, grasp_pose, target_poses, object_direction, grasp_required, pos_change = grasp_data + if abs(pos_change) < 0.1: + indented_print("Yaw change is small and done,", pos_change) + return + + # Prepare data for the approach later. + approach_pos = grasp_pose[0] + object_direction * m.OPEN_GRASP_APPROACH_DISTANCE + approach_pose = (approach_pos, grasp_pose[1]) + + # If the grasp pose is too far, navigate + yield from self._navigate_if_needed(obj, pose_on_obj=grasp_pose) + + yield from self._move_hand(grasp_pose, stop_if_stuck=True) + + # We can pre-grasp in sticky grasping mode only for opening + if should_open: + yield from self._execute_grasp() + + # Since the grasp pose is slightly off the object, we want to move towards the object, around 5cm. + # It's okay if we can't go all the way because we run into the object. + yield from self._navigate_if_needed(obj, pose_on_obj=approach_pose) + + if should_open: + yield from self._move_hand_linearly_cartesian( + approach_pose, ignore_failure=False, stop_on_contact=True, stop_if_stuck=True + ) + else: + yield from self._move_hand_linearly_cartesian( + approach_pose, ignore_failure=False, stop_if_stuck=True + ) + + # Step once to update + empty_action = self._empty_action() + yield self._postprocess_action(empty_action) + + for i, target_pose in enumerate(target_poses): + yield from self._move_hand_linearly_cartesian(target_pose, ignore_failure=False, stop_if_stuck=True) + + # Moving to target pose often fails. This might leave the robot's motors with torques that + # try to get to a far-away position thus applying large torques, but unable to move due to + # the sticky grasp joint. Thus if we release the joint, the robot might suddenly launch in an + # arbitrary direction. To avoid this, we command the hand to apply torques with its current + # position as its target. This prevents the hand from jerking into some other position when we do a release. + yield from self._move_hand_linearly_cartesian( + self.robot.eef_links[self.arm].get_position_orientation(), ignore_failure=True, stop_if_stuck=True + ) + + if should_open: + yield from self._execute_release() + yield from self._move_base_backward() + + except ActionPrimitiveError as e: + indented_print(e) + if should_open: + yield from self._execute_release() + yield from self._move_base_backward() + else: + yield from self._move_hand_backward() + + if obj.states[object_states.Open].get_value() != should_open: + raise ActionPrimitiveError( + ActionPrimitiveError.Reason.POST_CONDITION_ERROR, + "Despite executing the planned trajectory, the object did not open or close as expected. Maybe try again", + {"target object": obj.name, "is it currently open": obj.states[object_states.Open].get_value()}, + ) # TODO: Figure out how to generalize out of this "backing out" behavior. def _move_base_backward(self, steps=5, speed=0.2): @@ -579,8 +580,8 @@ def _grasp(self, obj): {"target object": obj.name}, ) - indented_print("Moving hand back") - yield from self._reset_hand() + # indented_print("Moving hand back") + # yield from self._reset_hand() indented_print("Done with grasp") @@ -615,36 +616,38 @@ def _place_inside(self, obj): """ yield from self._place_with_predicate(obj, object_states.Inside) - # def _toggle_on(self, obj): - # yield from self._toggle(obj, True) + def _toggle_on(self, obj): + yield from self._toggle(obj, True) - # def _toggle_off(self, obj): - # yield from self._toggle(obj, False) + def _toggle_off(self, obj): + yield from self._toggle(obj, False) - # def _toggle(self, obj, value): - # if obj.states[object_states.ToggledOn].get_value() == value: - # return + def _toggle(self, obj, value): + raise NotImplementedError("Toggle is not implemented correctly yet.") - # # Put the hand in the toggle marker. - # toggle_state = obj.states[object_states.ToggledOn] - # toggle_position = toggle_state.get_link_position() - # yield from self._navigate_if_needed(obj, toggle_position) + if obj.states[object_states.ToggledOn].get_value() == value: + return - # # Just keep the current hand orientation. - # hand_orientation = self.robot.eef_links[self.arm].get_position_orientation()[1] - # desired_hand_pose = (toggle_position, hand_orientation) + # Put the hand in the toggle marker. + toggle_state = obj.states[object_states.ToggledOn] + toggle_position = toggle_state.get_link_position() + yield from self._navigate_if_needed(obj, toggle_position) - # yield from self._move_hand(desired_hand_pose) + # Just keep the current hand orientation. + hand_orientation = self.robot.eef_links[self.arm].get_position_orientation()[1] + desired_hand_pose = (toggle_position, hand_orientation) - # if obj.states[object_states.ToggledOn].get_value() != value: - # raise ActionPrimitiveError( - # ActionPrimitiveError.Reason.POST_CONDITION_ERROR, - # "The object did not toggle as expected - maybe try again", - # { - # "target object": obj.name, - # "is it currently toggled on": obj.states[object_states.ToggledOn].get_value(), - # }, - # ) + yield from self._move_hand(desired_hand_pose) + + if obj.states[object_states.ToggledOn].get_value() != value: + raise ActionPrimitiveError( + ActionPrimitiveError.Reason.POST_CONDITION_ERROR, + "The object did not toggle as expected - maybe try again", + { + "target object": obj.name, + "is it currently toggled on": obj.states[object_states.ToggledOn].get_value(), + }, + ) def _place_with_predicate(self, obj, predicate): """ @@ -721,7 +724,8 @@ def _place_with_predicate(self, obj, predicate): {"dropped object": obj_in_hand.name, "target object": obj.name}, ) - yield from self._reset_hand() + # indented_print("Moving hand back") + # yield from self._reset_hand() def _convert_cartesian_to_joint_space(self, target_pose): """ @@ -973,7 +977,7 @@ def _execute_motion_plan( if ignore_physics: self.robot.set_joint_positions(joint_pos) og.sim.step() - if stop_on_contact and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=True): + if stop_on_contact and detect_robot_collision_in_sim(self.robot): return else: # Convert target joint positions to command @@ -1006,7 +1010,7 @@ def _execute_motion_plan( if base_target_reached and articulation_target_reached: break - collision_detected = detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=True) + collision_detected = detect_robot_collision_in_sim(self.robot) if stop_on_contact and collision_detected: indented_print(f"Collision detected at step {i + 1}/{len(q_traj)}") return @@ -1081,7 +1085,7 @@ def _move_hand_direct_joint(self, joint_pos, stop_on_contact=False, ignore_failu diff_joint_pos = joint_pos - current_joint_pos if th.max(th.abs(diff_joint_pos)).item() < m.JOINT_POS_DIFF_THRESHOLD: return - if stop_on_contact and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False): + if stop_on_contact and detect_robot_collision_in_sim(self.robot): return # check if the eef stayed in the same pose for sufficiently long if ( @@ -1162,10 +1166,10 @@ def _move_hand_direct_ik( if reached_goal: return - if stop_on_contact and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False): + if stop_on_contact and detect_robot_collision_in_sim(self.robot): return - # if i > 0 and stop_if_stuck and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False): + # if i > 0 and stop_if_stuck and detect_robot_collision_in_sim(self.robot): if i > 0 and stop_if_stuck: pos_diff = th.norm(prev_pos - current_pos) orn_diff = T.get_orientation_diff_in_radian(current_orn, prev_orn) @@ -1242,7 +1246,7 @@ def _move_hand_linearly_cartesian( if pos_diff < m.HAND_DIST_THRESHOLD and orn_diff < th.deg2rad(th.tensor([0.1])).item(): return - if stop_on_contact and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False): + if stop_on_contact and detect_robot_collision_in_sim(self.robot): return if not ignore_failure: @@ -1287,7 +1291,7 @@ def _move_hand_linearly_cartesian( if pos_diff < 0.001 and orn_diff < th.deg2rad(th.tensor([0.1])).item(): return - if stop_on_contact and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False): + if stop_on_contact and detect_robot_collision_in_sim(self.robot): return if not ignore_failure: @@ -1389,7 +1393,7 @@ def _overwrite_head_action(self, action): def _get_head_goal_q(self, target_obj_pose): """ Get goal joint positions for head to look at an object of interest, - If the object cannot be seen, return the current head joint positions. + If the object cannot be seen, return the reset (default) head joint positions. """ # get current head joint positions @@ -1420,7 +1424,7 @@ def _get_head_goal_q(self, target_obj_pose): if head2_joint_limits[0] < phi < head2_joint_limits[1]: head2_joint_goal = phi - # if not possible to look at object, return current head joint positions + # if not possible to look at object, return the reset (default) head joint positions else: default_head_pos = self.robot.reset_joint_pos[self.robot.controller_action_idx["camera"]] head1_joint_goal = default_head_pos[0] @@ -1519,30 +1523,7 @@ def _get_reset_eef_pose(self, frame="robot"): Returns: dict of th.tensor: The reset eef pose for each robot arm """ - if isinstance(self.robot, R1): - pose = { - self.robot.arm_names[0]: ( - th.tensor([0.45, 0.183, 1.23]), - th.tensor([1.0, 0.0, 0.0, 0.0]), - ), - self.robot.arm_names[1]: ( - th.tensor([0.45, -0.183, 1.23]), - th.tensor([-1.0, 0.0, 0.0, 0.0]), - ), - } - elif isinstance(self.robot, Tiago): - pose = { - self.robot.arm_names[0]: ( - th.tensor([0.5, 0.23, 0.85]), - th.tensor([0.7071, -0.7071, 0.0000, -0.0000]), - ), - self.robot.arm_names[1]: ( - th.tensor([0.5, -0.23, 0.85]), - th.tensor([-0.7071, -0.7071, -0.0000, -0.0000]), - ), - } - else: - raise ValueError(f"Unsupported robot: {type(self.robot)}") + pose = self._reset_eef_pose if frame == "robot": return pose elif frame == "world": diff --git a/omnigibson/examples/action_primitives/rs_int_example.py b/omnigibson/examples/action_primitives/rs_int_example.py index b784f1c5a..8f9977ea4 100644 --- a/omnigibson/examples/action_primitives/rs_int_example.py +++ b/omnigibson/examples/action_primitives/rs_int_example.py @@ -76,14 +76,14 @@ def main(): apple = scene.object_registry("name", "apple") # Grasp apple - print("Executing controller") + print("Start executing grasp") execute_controller(controller.apply_ref(StarterSemanticActionPrimitiveSet.GRASP, apple), env) - print("Finished executing grasp") + print("Finish executing grasp") # Place on cabinet - print("Executing controller") + print("Start executing place") execute_controller(controller.apply_ref(StarterSemanticActionPrimitiveSet.PLACE_ON_TOP, coffee_table), env) - print("Finished executing place") + print("Finish executing place") og.shutdown() diff --git a/omnigibson/object_states/contact_bodies.py b/omnigibson/object_states/contact_bodies.py index b6ba6187f..eab7e5686 100644 --- a/omnigibson/object_states/contact_bodies.py +++ b/omnigibson/object_states/contact_bodies.py @@ -1,6 +1,8 @@ from omnigibson.object_states.object_state_base import AbsoluteObjectState from omnigibson.utils.sim_utils import prim_paths_to_rigid_prims +import numpy as np + class ContactBodies(AbsoluteObjectState): """ @@ -8,11 +10,12 @@ class ContactBodies(AbsoluteObjectState): For frequent contact checks, consider using RigidContactAPI for performance. """ - def _get_value(self, ignore_objs=None): + def _get_value(self, ignore_objs=None, non_zero_impulse=False): # Compute bodies in contact, minus the self-owned bodies bodies = set() for contact in self.obj.contact_list(): - bodies.update({contact.body0, contact.body1}) + if not non_zero_impulse or np.linalg.norm(tuple(contact.impulse)) > 0: + bodies.update({contact.body0, contact.body1}) bodies -= set(self.obj.link_prim_paths) rigid_prims = prim_paths_to_rigid_prims(bodies, self.obj.scene) # Ignore_objs should either be None or tuple (CANNOT be list because we need to hash these inputs) diff --git a/omnigibson/objects/dataset_object.py b/omnigibson/objects/dataset_object.py index 98fce936b..8b5330a3a 100644 --- a/omnigibson/objects/dataset_object.py +++ b/omnigibson/objects/dataset_object.py @@ -239,11 +239,14 @@ def _post_load(self): scale[valid_idxes] = ( th.tensor(self._load_config["bounding_box"])[valid_idxes] / self.native_bbox[valid_idxes] ) + elif self._load_config["scale"] is not None: + scale = self._load_config["scale"] + scale = scale if th.is_tensor(scale) else th.tensor(scale, dtype=th.float32) else: - scale = th.ones(3) if self._load_config["scale"] is None else self._load_config["scale"] + scale = th.ones(3) # Assert that the scale does not have too small dimensions - assert th.all(th.tensor(scale) > 1e-4), f"Scale of {self.name} is too small: {scale}" + assert th.all(scale > 1e-4), f"Scale of {self.name} is too small: {scale}" # Set this scale in the load config -- it will automatically scale the object during self.initialize() self._load_config["scale"] = scale diff --git a/omnigibson/robots/fetch.py b/omnigibson/robots/fetch.py index 325cf2f10..8b4702cb7 100644 --- a/omnigibson/robots/fetch.py +++ b/omnigibson/robots/fetch.py @@ -212,8 +212,8 @@ def finger_lengths(self): def assisted_grasp_start_points(self): return { self.default_arm: [ - GraspingPoint(link_name="r_gripper_finger_link", position=th.tensor([0.025, -0.012, 0.0])), - GraspingPoint(link_name="r_gripper_finger_link", position=th.tensor([-0.025, -0.012, 0.0])), + GraspingPoint(link_name="r_gripper_finger_link", position=th.tensor([0.025, -0.02, 0.0])), + GraspingPoint(link_name="r_gripper_finger_link", position=th.tensor([-0.025, -0.02, 0.0])), ] } @@ -221,8 +221,8 @@ def assisted_grasp_start_points(self): def assisted_grasp_end_points(self): return { self.default_arm: [ - GraspingPoint(link_name="l_gripper_finger_link", position=th.tensor([0.025, 0.012, 0.0])), - GraspingPoint(link_name="l_gripper_finger_link", position=th.tensor([-0.025, 0.012, 0.0])), + GraspingPoint(link_name="l_gripper_finger_link", position=th.tensor([0.025, 0.02, 0.0])), + GraspingPoint(link_name="l_gripper_finger_link", position=th.tensor([-0.025, 0.02, 0.0])), ] } diff --git a/omnigibson/utils/motion_planning_utils.py b/omnigibson/utils/motion_planning_utils.py index 32b6ceb67..a4d9bb850 100644 --- a/omnigibson/utils/motion_planning_utils.py +++ b/omnigibson/utils/motion_planning_utils.py @@ -11,7 +11,8 @@ from omnigibson.utils.geometry_utils import wrap_angle from omnigibson.utils.sim_utils import prim_paths_to_rigid_prims from omnigibson.utils.ui_utils import create_module_logger -from omnigibson.utils.usd_utils import GripperRigidContactAPI +from omnigibson.utils.constants import GROUND_CATEGORIES +from omnigibson.object_states import ContactBodies # Create module logger logger = create_module_logger(module_name=__name__) @@ -522,7 +523,7 @@ def detect_robot_collision_in_sim(robot, filter_objs=None, ignore_obj_in_hand=Tr Args: robot (BaseRobot): Robot object to detect collisions for - filter_objs (Array of StatefulObject or None): Objects to ignore collisions with + filter_objs (list of DatasetObject or None): Objects to ignore collisions with ignore_obj_in_hand (bool): Whether to ignore collisions with the object in the robot's hand Returns: @@ -531,24 +532,22 @@ def detect_robot_collision_in_sim(robot, filter_objs=None, ignore_obj_in_hand=Tr if filter_objs is None: filter_objs = [] - filter_categories = ["floors"] - - obj_in_hand = robot._ag_obj_in_hand[robot.default_arm] - if obj_in_hand is not None and ignore_obj_in_hand: - filter_objs.append(obj_in_hand) + if ignore_obj_in_hand: + for arm in robot.arm_names: + if robot.grasping_mode in ["sticky", "assisted"]: + if robot._ag_obj_in_hand[arm] is not None: + filter_objs.append(robot._ag_obj_in_hand[arm]) + elif robot.grasping_mode == "physical": + prim_paths = robot._find_gripper_raycast_collisions(arm=arm) + for obj, _ in prim_paths_to_rigid_prims(prim_paths, robot.scene): + filter_objs.append(obj) + else: + raise ValueError(f"Unknown grasping mode: {robot.grasping_mode}") - # Use the RigidCollisionAPI to get the things this robot is colliding with - scene_idx = robot.scene.idx - link_paths = set(robot.link_prim_paths) - collision_body_paths = { - row - for row, _ in GripperRigidContactAPI.get_contact_pairs(scene_idx, column_prim_paths=link_paths) - if row not in link_paths - } + for category in GROUND_CATEGORIES: + filter_objs.extend(robot.scene.object_registry("category", category, [])) - # Convert to prim objects and filter out the necessary objects. - rigid_prims = prim_paths_to_rigid_prims(collision_body_paths, robot.scene) - return any(o not in filter_objs and o.category not in filter_categories for o, p in rigid_prims) + return any(robot.states[ContactBodies].get_value(ignore_objs=tuple(filter_objs), non_zero_impulse=True)) def astar(search_map, start, goal, eight_connected=True): diff --git a/tests/test_robot_states_flatcache.py b/tests/test_robot_states_flatcache.py index ae699ca3e..7520e83bb 100644 --- a/tests/test_robot_states_flatcache.py +++ b/tests/test_robot_states_flatcache.py @@ -9,6 +9,7 @@ from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.utils.transform_utils import mat2pose, pose2mat, quaternions_close, relative_pose_transform from omnigibson.utils.usd_utils import PoseAPI +from omnigibson.utils.sim_utils import prim_paths_to_rigid_prims def setup_environment(flatcache): @@ -259,10 +260,14 @@ def test_grasping_mode(): physical="Physical Grasping - No additional grasping assistance applied", ) - def object_is_in_hand(robot, obj): - eef_position = robot.get_eef_position() - obj_position = obj.get_position_orientation()[0] - return th.norm(eef_position - obj_position) < 0.05 + def object_is_in_hand(robot, obj, grasping_mode): + if grasping_mode in ["sticky", "assisted"]: + return robot._ag_obj_in_hand[robot.default_arm] == obj + elif grasping_mode == "physical": + prim_paths = robot._find_gripper_raycast_collisions() + return obj in {obj for (obj, _) in prim_paths_to_rigid_prims(prim_paths, obj.scene)} + else: + raise ValueError(f"Unknown grasping mode: {grasping_mode}") for grasping_mode in grasping_modes: robot = Fetch( @@ -303,21 +308,27 @@ def object_is_in_hand(robot, obj): for _ in range(20): og.sim.step() - assert object_is_in_hand(robot, box_object), f"Grasping mode {grasping_mode} failed to grasp the object" + assert object_is_in_hand( + robot, box_object, grasping_mode + ), f"Grasping mode {grasping_mode} failed to grasp the object" # Move eef eef_offset = th.tensor([0.0, 0.2, 0.2]) for action in action_primitives._move_hand_direct_ik((target_eef_pos + eef_offset, target_eef_orn)): env.step(action) - assert object_is_in_hand(robot, box_object), f"Grasping mode {grasping_mode} failed to keep the object in hand" + assert object_is_in_hand( + robot, box_object, grasping_mode + ), f"Grasping mode {grasping_mode} failed to keep the object in hand" # Release the box gripper_controller.update_goal(cb.array([1]), robot.get_control_dict()) for _ in range(20): og.sim.step() - assert not object_is_in_hand(robot, box_object), f"Grasping mode {grasping_mode} failed to release the object" + assert not object_is_in_hand( + robot, box_object, grasping_mode + ), f"Grasping mode {grasping_mode} failed to release the object" # Stop the simulator and remove the robot og.sim.stop() From 4c29f321eec6748e0b8df59ae2274d6093971c3d Mon Sep 17 00:00:00 2001 From: Chengshu Li Date: Fri, 17 Jan 2025 15:18:50 -0800 Subject: [PATCH 60/76] address comments --- .../starter_semantic_action_primitives.py | 50 +++++++++---------- .../holonomic_base_joint_controller.py | 2 + tests/test_robot_states_flatcache.py | 1 + 3 files changed, 28 insertions(+), 25 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 932b69bdc..b644de3a8 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -27,7 +27,8 @@ from omnigibson.action_primitives.curobo import CuRoboEmbodimentSelection, CuRoboMotionGenerator from omnigibson.controllers import ( InverseKinematicsController, - JointController, + HolonomicBaseJointController, + DifferentialDriveController, ) from omnigibson.macros import create_module_macros from omnigibson.objects.object_base import BaseObject @@ -95,7 +96,7 @@ m.HAND_DIST_THRESHOLD = 0.002 m.DEFAULT_DIST_THRESHOLD = 0.005 -m.DEFAULT_ANGLE_THRESHOLD = 0.1 +m.DEFAULT_ANGLE_THRESHOLD = 0.02 m.LOW_PRECISION_DIST_THRESHOLD = 0.1 m.LOW_PRECISION_ANGLE_THRESHOLD = 0.2 @@ -162,7 +163,7 @@ def __init__( """ log.warning( "The StarterSemanticActionPrimitive is a work-in-progress and is only provided as an example. " - "It currently only works with Tiago and R1 with their JointControllers set to absolute position mode." + "It currently only works with Tiago and R1 with their HolonomicBaseJointController/JointControllers set to absolute position mode." ) super().__init__(env, robot) self.controller_functions = { @@ -1497,7 +1498,7 @@ def _reset_robot(self): embodiment_selection=CuRoboEmbodimentSelection.ARM, ) indented_print(f"Plan has {len(q_traj)} steps") - yield from self._execute_motion_plan(q_traj, low_precision=True) + yield from self._execute_motion_plan(q_traj) def _reset_hand(self): """ @@ -1511,7 +1512,7 @@ def _reset_hand(self): reset_eef_pose = self._get_reset_eef_pose("world")[self.arm] if self.debug_visual_marker is not None: self.debug_visual_marker.set_position_orientation(*reset_eef_pose) - yield from self._move_hand(reset_eef_pose, low_precision=True) + yield from self._move_hand(reset_eef_pose) def _get_reset_eef_pose(self, frame="robot"): """ @@ -1634,6 +1635,7 @@ def _navigate_to_pose_direct(self, pose_2d, low_precision=False): end_pose = self._get_robot_pose_from_2d_pose(pose_2d) body_target_pose = self._world_pose_to_robot_pose(end_pose) + finished_rotating = False for _ in range(m.MAX_STEPS_FOR_WAYPOINT_NAVIGATION): if th.norm(body_target_pose[0][:2]) < dist_threshold: break @@ -1645,25 +1647,25 @@ def _navigate_to_pose_direct(self, pose_2d, low_precision=False): ) body_intermediate_pose = self._world_pose_to_robot_pose(intermediate_pose) diff_yaw = T.quat2euler(body_intermediate_pose[1])[2].item() - if abs(diff_yaw) > m.DEFAULT_ANGLE_THRESHOLD: + if not finished_rotating and abs(diff_yaw) > m.DEFAULT_ANGLE_THRESHOLD: yield from self._rotate_in_place(intermediate_pose, angle_threshold=m.DEFAULT_ANGLE_THRESHOLD) else: + finished_rotating = True action = self._empty_action() - - if isinstance(self.robot.controllers["base"], JointController): - base_action_size = self.robot.controller_action_idx["base"].numel() + if isinstance(self.robot.controllers["base"], HolonomicBaseJointController): assert ( - base_action_size == 3 - ), "Currently, the action primitives only support [x, y, theta] joint controller" + self.robot.controllers["base"].motor_type == "velocity" + ), "Holonomic base controller must be in velocity mode" direction_vec = ( body_target_pose[0][:2] / th.norm(body_target_pose[0][:2]) * m.KP_LIN_VEL[type(self.robot)] ) base_action = th.tensor([direction_vec[0], direction_vec[1], 0.0], dtype=th.float32) action[self.robot.controller_action_idx["base"]] = base_action - else: - # Diff drive controller + elif isinstance(self.robot.controllers["base"], DifferentialDriveController): base_action = th.tensor([m.KP_LIN_VEL[type(self.robot)], 0.0], dtype=th.float32) action[self.robot.controller_action_idx["base"]] = base_action + else: + raise ValueError(f"Unsupported base controller: {type(self.robot.controllers['base'])}") yield self._postprocess_action(action) @@ -1689,10 +1691,9 @@ def _rotate_in_place(self, end_pose, angle_threshold=m.DEFAULT_ANGLE_THRESHOLD): Returns: th.tensor or None: Action array for one step for the robot to rotate or None if it is done rotating """ - body_target_pose = self._world_pose_to_robot_pose(end_pose) - diff_yaw = T.quat2euler(body_target_pose[1])[2].item() - for _ in range(m.MAX_STEPS_FOR_WAYPOINT_NAVIGATION): + body_target_pose = self._world_pose_to_robot_pose(end_pose) + diff_yaw = T.quat2euler(body_target_pose[1])[2].item() if abs(diff_yaw) < angle_threshold: break @@ -1703,22 +1704,21 @@ def _rotate_in_place(self, end_pose, angle_threshold=m.DEFAULT_ANGLE_THRESHOLD): base_action = action[self.robot.controller_action_idx["base"]] - if not isinstance(self.robot.controllers["base"], JointController): - base_action[0] = 0.0 - base_action[1] = ang_vel - else: + if isinstance(self.robot.controllers["base"], HolonomicBaseJointController): assert ( - base_action.numel() == 3 - ), "Currently, the action primitives only support [x, y, theta] joint controller" + self.robot.controllers["base"].motor_type == "velocity" + ), "Holonomic base controller must be in velocity mode" base_action[0] = 0.0 base_action[1] = 0.0 base_action[2] = ang_vel + elif isinstance(self.robot.controllers["base"], DifferentialDriveController): + base_action[0] = 0.0 + base_action[1] = ang_vel + else: + raise ValueError(f"Unsupported base controller: {type(self.robot.controllers['base'])}") action[self.robot.controller_action_idx["base"]] = base_action yield self._postprocess_action(action) - - body_target_pose = self._world_pose_to_robot_pose(end_pose) - diff_yaw = T.quat2euler(body_target_pose[1])[2].item() else: raise ActionPrimitiveError( ActionPrimitiveError.Reason.EXECUTION_ERROR, diff --git a/omnigibson/controllers/holonomic_base_joint_controller.py b/omnigibson/controllers/holonomic_base_joint_controller.py index 8c36bd265..6ab998ac8 100644 --- a/omnigibson/controllers/holonomic_base_joint_controller.py +++ b/omnigibson/controllers/holonomic_base_joint_controller.py @@ -84,6 +84,8 @@ def __init__( use_impedances=use_impedances, use_gravity_compensation=use_gravity_compensation, use_cc_compensation=use_cc_compensation, + use_delta_commands=False, + compute_delta_in_quat_space=None, ) def _update_goal(self, command, control_dict): diff --git a/tests/test_robot_states_flatcache.py b/tests/test_robot_states_flatcache.py index 7520e83bb..3de592249 100644 --- a/tests/test_robot_states_flatcache.py +++ b/tests/test_robot_states_flatcache.py @@ -206,6 +206,7 @@ def test_robot_load_drive(): for action in action_primitives._navigate_to_pose_direct(goal_location): env.step(action) assert th.norm(robot.get_position()[:2] - goal_location[:2]) < 0.1 + assert robot.get_rpy()[2] - goal_location[2] < 0.1 # Stop the simulator and remove the robot og.sim.stop() From e9d328e8d3ece12e36169154b3740506bada439b Mon Sep 17 00:00:00 2001 From: Chengshu Li Date: Fri, 17 Jan 2025 19:36:58 -0800 Subject: [PATCH 61/76] address final comments --- .../starter_semantic_action_primitives.py | 188 +++++++++++------- omnigibson/tasks/grasp_task.py | 2 +- omnigibson/utils/grasping_planning_utils.py | 8 +- 3 files changed, 119 insertions(+), 79 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index b644de3a8..0e8ab47c4 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -85,6 +85,7 @@ m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_WITH_OBJECT_AND_PREDICATE = 20 m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT = 200 +m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_FOR_CORRECT_ROOM = 20 m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_IN_ROOM = 60 m.MAX_ATTEMPTS_FOR_SAMPLING_PLACE_POSE = 50 m.PREDICATE_SAMPLING_Z_OFFSET = 0.02 @@ -140,8 +141,7 @@ def __init__( enable_head_tracking=True, always_track_eef=False, task_relevant_objects_only=False, - planning_batch_size=3, - collision_check_batch_size=5, + curobo_batch_size=3, debug_visual_marker=None, skip_curobo_initilization=False, ): @@ -156,8 +156,7 @@ def __init__( to switching between target object and end effector based on context. Defaults to False. task_relevant_objects_only (bool): Whether to only consider objects relevant to the task when computing the action space. Defaults to False. - planning_batch_size (int): The batch size for curobo motion planning. Defaults to 3. - collision_check_batch_size (int): The batch size for curobo collision checking. Defaults to 5. + curobo_batch_size (int): The batch size for curobo motion planning and collision checking. Defaults to 3. debug_visual_marker (PrimitiveObject): The object to use for debug visual markers. Defaults to None. skip_curobo_initilization (bool): Whether to skip curobo initialization. Defaults to False. """ @@ -182,7 +181,7 @@ def __init__( if skip_curobo_initilization else CuRoboMotionGenerator( robot=self.robot, - batch_size=planning_batch_size, + batch_size=curobo_batch_size, collision_activation_distance=m.DEFAULT_COLLISION_ACTIVATION_DISTANCE, ) ) @@ -211,7 +210,7 @@ def __init__( arm_target = cb.to_torch(control_dict["joint_position"])[arm_ctrl.dof_idx] self._arm_targets[arm] = arm_target - self._collision_check_batch_size = collision_check_batch_size + self._curobo_batch_size = curobo_batch_size self.debug_visual_marker = debug_visual_marker @property @@ -321,7 +320,7 @@ def apply_ref(self, primitive, *args, attempts=5): raise ActionPrimitiveErrorGroup(errors) - # One-attempt debug version + # # One-attempt debug version # ctrl = self.controller_functions[primitive] # yield from ctrl(*args) # if not self._get_obj_in_hand(): @@ -377,7 +376,7 @@ def _open_or_close(self, obj, should_open): approach_pose = (approach_pos, grasp_pose[1]) # If the grasp pose is too far, navigate - yield from self._navigate_if_needed(obj, pose_on_obj=grasp_pose) + yield from self._navigate_if_needed(obj, eef_pose=grasp_pose) yield from self._move_hand(grasp_pose, stop_if_stuck=True) @@ -387,7 +386,7 @@ def _open_or_close(self, obj, should_open): # Since the grasp pose is slightly off the object, we want to move towards the object, around 5cm. # It's okay if we can't go all the way because we run into the object. - yield from self._navigate_if_needed(obj, pose_on_obj=approach_pose) + yield from self._navigate_if_needed(obj, eef_pose=approach_pose) if should_open: yield from self._move_hand_linearly_cartesian( @@ -531,42 +530,50 @@ def _grasp(self, obj): # Allow grasping from suboptimal extents if we've tried enough times. indented_print("Sampling grasp pose") grasp_poses = get_grasp_poses_for_object_sticky(obj) - grasp_pose, object_direction = random.choice(grasp_poses) + grasp_pos, grasp_quat = random.choice(grasp_poses) - grasp_offset_in_z = self.robot.finger_lengths[self.arm] / 2.0 + m.GRASP_APPROACH_DISTANCE + # Identity quaternion for top-down grasping (x-forward, y-right, z-down) + approach_dir = T.quat2mat(grasp_quat) @ th.tensor([0.0, 0.0, -1.0]) - # Adjust grasp pose with reset orientation and finger length offset - reset_orientation = self._get_reset_eef_pose("world")[self.arm][1] - grasp_pos = grasp_pose[0] - object_direction * grasp_offset_in_z - grasp_quat = T.quat_multiply(grasp_pose[1], reset_orientation) - grasp_pose = (grasp_pos, grasp_quat) + pregrasp_offset = self.robot.finger_lengths[self.arm] / 2.0 + m.GRASP_APPROACH_DISTANCE + + pregrasp_pos = grasp_pos - approach_dir * pregrasp_offset + + # The sampled grasp pose is robot-agnostic + # We need to multiply by the quaternion of the robot's eef frame of top-down grasping (x-forward, y-right, z-down) + grasp_quat = T.quat_multiply(grasp_quat, th.tensor([1.0, 0.0, 0.0, 0.0])) - # Prepare data for the approach later. - approach_pos = grasp_pose[0] + object_direction * grasp_offset_in_z - approach_pose = (approach_pos, grasp_pose[1]) + pregrasp_pose = (pregrasp_pos, grasp_quat) + grasp_pose = (grasp_pos, grasp_quat) - # If the grasp pose is too far, navigate. + # If the pre-grasp pose is too far, navigate. indented_print("Navigating to grasp pose if needed") - yield from self._navigate_if_needed(obj, pose_on_obj=grasp_pose) + yield from self._navigate_if_needed(obj, eef_pose=pregrasp_pose) indented_print("Moving hand to grasp pose") - yield from self._move_hand(grasp_pose) + yield from self._move_hand(pregrasp_pose) if self.robot.grasping_mode == "sticky": - # Pre-grasp in sticky grasping mode. - indented_print("Pregrasp squeeze") + indented_print("Sticky grasping: close gripper") + # Close the gripper yield from self._execute_grasp() - # Since the pregrasp pose is slightly away from the object, we want to move towards the object - # It's okay if we can't go all the way because we run into the object. - indented_print("Performing grasp approach") - # Only translate in the z direction. - # Ignore the object during motion planning because the fingers are closed. + + indented_print("Sticky grasping: approach") + # Only translate in the z-axis of the goal frame (assuming z-axis points out of the gripper) + # This is the same as requesting the end-effector to move along the approach_dir direction. + # By default, it's NOT the z-axis of the world frame unless `project_pose_to_goal_frame=False` is set in curobo. + # For sticky grasping, we also need to ignore the object during motion planning because the fingers are already closed. yield from self._move_hand( - approach_pose, motion_constraint=[1, 1, 1, 1, 1, 0], stop_on_contact=True, ignore_objects=[obj] + grasp_pose, motion_constraint=[1, 1, 1, 1, 1, 0], stop_on_contact=True, ignore_objects=[obj] ) elif self.robot.grasping_mode == "assisted": - indented_print("Performing grasp approach") - yield from self._move_hand(approach_pose) + indented_print("Assisted grasping: approach") + # Same as above in terms of moving along the approach_dir direction, but we don't ignore the object. + # For this approach motion, we expect the fingers to move towards and eventually "wrap" around the object without collisions. + yield from self._move_hand(grasp_pose, motion_constraint=[1, 1, 1, 1, 1, 0]) + + # Now we close the fingers to grasp the object with AG. + indented_print("Assisted grasping: close gripper") yield from self._execute_grasp() # Step a few times to update @@ -682,8 +689,7 @@ def _place_with_predicate(self, obj, predicate): valid_navigation_pose = None for _ in range(m.MAX_ATTEMPTS_FOR_SAMPLING_PLACE_POSE): # Sample one pose at a time - obj_pose = self._sample_pose_with_object_and_predicate(predicate, obj_in_hand, obj) - obj_pose = (obj_pose[0], obj_in_hand.get_position_orientation()[1]) + obj_pose = self._sample_pose_with_object_and_predicate(predicate, obj_in_hand, obj, keep_orientation=True) hand_pose = self._get_hand_pose_for_object_pose(obj_pose) # First check if we can directly move the hand there @@ -694,7 +700,7 @@ def _place_with_predicate(self, obj, predicate): # If not, try to find a valid navigation pose for this hand pose valid_navigation_pose = self._sample_pose_near_object( - obj, pose_on_obj=hand_pose, sampling_attempts=10, skip_obstacle_update=True + obj, eef_pose=hand_pose, sampling_attempts=10, skip_obstacle_update=True ) if valid_navigation_pose is not None: @@ -955,6 +961,8 @@ def _plan_joint_motion( # Grab the first successful trajectory if found success_idx = th.where(successes)[0].cpu() if len(success_idx) == 0: + # print("motion planning fails") + # breakpoint() raise ActionPrimitiveError( ActionPrimitiveError.Reason.PLANNING_ERROR, "There is no accessible path from where you are to the desired pose. Try again", @@ -1548,6 +1556,8 @@ def _navigate_to_pose(self, pose_2d): target_pos = {self.robot.base_footprint_link_name: pose_3d[0]} target_quat = {self.robot.base_footprint_link_name: pose_3d[1]} + # print("base motion planning") + # breakpoint() q_traj = self._plan_joint_motion(target_pos, target_quat, CuRoboEmbodimentSelection.BASE) yield from self._execute_motion_plan(q_traj) @@ -1575,40 +1585,38 @@ def _draw_plan(self, plan): cv2.imshow("SceneGraph", img) cv2.waitKey(1) - def _navigate_if_needed(self, obj, pose_on_obj=None, **kwargs): + def _navigate_if_needed(self, obj, eef_pose=None, **kwargs): """ Yields action to navigate the robot to be in range of the object if it not in the range Args: obj (StatefulObject): Object for the robot to be in range of - pose_on_obj (Iterable): (pos, quat) Pose + eef_pose (Iterable): (pos, quat) Pose Returns: th.tensor or None: Action array for one step for the robot to navigate or None if it is done navigating """ self._motion_generator.update_obstacles() - if pose_on_obj is not None: - if self._target_in_reach_of_robot(pose_on_obj, skip_obstacle_update=True): - # No need to navigate. - return - elif self._target_in_reach_of_robot(obj.get_position_orientation(), skip_obstacle_update=True): + eef_pose = eef_pose if eef_pose is not None else obj.get_position_orientation() + if self._target_in_reach_of_robot(eef_pose, skip_obstacle_update=True): + # No need to navigate. return - yield from self._navigate_to_obj(obj, pose_on_obj=pose_on_obj, skip_obstacle_update=True, **kwargs) + yield from self._navigate_to_obj(obj, eef_pose=eef_pose, skip_obstacle_update=True, **kwargs) - def _navigate_to_obj(self, obj, pose_on_obj=None, skip_obstacle_update=False, **kwargs): + def _navigate_to_obj(self, obj, eef_pose=None, skip_obstacle_update=False, **kwargs): """ Yields action to navigate the robot to be in range of the pose Args: obj (StatefulObject or list of StatefulObject): object(s) to be in range of - pose_on_obj (Iterable or list of Iterable): (pos, quat) Pose(s) + eef_pose (Iterable or list of Iterable): (pos, quat) Pose(s) Returns: th.tensor or None: Action array for one step for the robot to navigate in range or None if it is done navigating """ pose = self._sample_pose_near_object( - obj, pose_on_obj=pose_on_obj, skip_obstacle_update=skip_obstacle_update, **kwargs + obj, eef_pose=eef_pose, skip_obstacle_update=skip_obstacle_update, **kwargs ) if pose is None: raise ActionPrimitiveError( @@ -1732,7 +1740,7 @@ def _rotate_in_place(self, end_pose, angle_threshold=m.DEFAULT_ANGLE_THRESHOLD): def _sample_pose_near_object( self, obj, - pose_on_obj=None, + eef_pose=None, sampling_attempts=m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT, skip_obstacle_update=False, **kwargs, @@ -1742,7 +1750,7 @@ def _sample_pose_near_object( Args: obj (StatefulObject or list of StatefulObject): object(s) to sample a 2d pose near - pose_on_obj (Iterable or list of Iterable): (pos, quat) Pose(s) to sample near. + eef_pose (Iterable or list of Iterable): (pos, quat) Pose(s) to sample near. If provided, must match the length of obj list if obj is a list Returns: @@ -1754,11 +1762,12 @@ def _sample_pose_near_object( yaw_lo, yaw_hi = -math.pi, math.pi avg_arm_workspace_range = th.mean(self.robot.arm_workspace_range[self.arm]) - target_pose = ( - (self._sample_position_on_aabb_side(obj), self._get_reset_eef_pose()[self.arm][1]) - if pose_on_obj is None - else pose_on_obj - ) + target_pose = eef_pose if eef_pose is not None else obj.get_position_orientation() + # target_pose = ( + # (self._sample_position_on_aabb_side(obj), self._get_reset_eef_pose()[self.arm][1]) + # if eef_pose is None + # else eef_pose + # ) obj_rooms = ( obj.in_rooms if obj.in_rooms else [self.robot.scene._seg_map.get_room_instance_by_point(target_pose[0][:2])] @@ -1770,8 +1779,8 @@ def _sample_pose_near_object( self._motion_generator.update_obstacles() while attempt < sampling_attempts: candidate_poses = [] - for _ in range(self._collision_check_batch_size): - while True: + for _ in range(self._curobo_batch_size): + for _ in range(m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_FOR_CORRECT_ROOM): distance = (th.rand(1) * (distance_hi - distance_lo) + distance_lo).item() yaw = th.rand(1) * (yaw_hi - yaw_lo) + yaw_lo candidate_2d_pose = th.cat( @@ -1784,19 +1793,24 @@ def _sample_pose_near_object( # Check room if self.robot.scene._seg_map.get_room_instance_by_point(candidate_2d_pose[:2]) in obj_rooms: - # indented_print("Candidate position is in the wrong room.") break candidate_poses.append(candidate_2d_pose) - result = self._validate_poses(candidate_poses, pose_on_obj=target_pose, skip_obstacle_update=True, **kwargs) + # Normally candidate_poses will have length equal to self._curobo_batch_size + # In case we are unable to find a valid pose in the room, we will have less than self._curobo_batch_size. + # We skip the following steps if the list is empty. + if len(candidate_poses) > 0: + result = self._validate_poses( + candidate_poses, eef_pose=target_pose, skip_obstacle_update=True, **kwargs + ) - # If anything in result is true, return the pose - for i, res in enumerate(result): - if res: - indented_print("Found valid position near object.") - return candidate_poses[i] + # If anything in result is true, return the pose + for i, res in enumerate(result): + if res: + indented_print("Found valid position near object.") + return candidate_poses[i] - attempt += self._collision_check_batch_size + attempt += self._curobo_batch_size return None @staticmethod @@ -1850,7 +1864,7 @@ def _sample_position_on_aabb_side(target_obj): # ) def _sample_pose_with_object_and_predicate( - self, predicate, held_obj, target_obj, near_poses=None, near_poses_threshold=None + self, predicate, held_obj, target_obj, keep_orientation=False, near_poses=None, near_poses_threshold=None ): """ Returns a pose for the held object relative to the target object that satisfies the predicate @@ -1859,6 +1873,9 @@ def _sample_pose_with_object_and_predicate( predicate (object_states.OnTop or object_states.Inside): Relation between held object and the target object held_obj (StatefulObject): Object held by the robot target_obj (StatefulObject): Object to sample a pose relative to + keep_orientation (bool): Whether to keep the current orientation of the held object after placing. + If True, the sampled cuboid will be aligned with the current orientation of the held object. + If False, the sampled cuboid will be aligned with the base link of the held object. near_poses (Iterable of arrays): Poses in the world frame to sample near near_poses_threshold (float): The distance threshold to check if the sampled pose is near the poses in near_poses @@ -1870,16 +1887,28 @@ def _sample_pose_with_object_and_predicate( pred_map = {object_states.OnTop: "onTop", object_states.Inside: "inside"} for _ in range(m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_WITH_OBJECT_AND_PREDICATE): - _, _, bb_extents, bb_center_in_base = held_obj.get_base_aligned_bbox() + if keep_orientation: + # Take the bbox aligned to the world frame because we want to keep the orientation + bb_extents = held_obj.aabb_extent + bb_center = held_obj.aabb_center + # Compute bbox pose in the object base link frame + bb_pos_in_base, bb_orn_in_base = T.relative_pose_transform( + bb_center, th.tensor([0, 0, 0, 1], dtype=th.float32), *held_obj.get_position_orientation() + ) + else: + _, _, bb_extents, bb_pos_in_base = held_obj.get_base_aligned_bbox() + bb_orn_in_base = th.tensor([0, 0, 0, 1], dtype=th.float32) + sampling_results = sample_cuboid_for_predicate(pred_map[predicate], target_obj, bb_extents) if sampling_results[0][0] is None: continue sampled_bb_center = sampling_results[0][0] + th.tensor([0, 0, m.PREDICATE_SAMPLING_Z_OFFSET]) sampled_bb_orn = sampling_results[0][2] - # Get the object pose by subtracting the offset + # Tobj_in_world @ Tbbox_in_obj = Tbbox_in_world + # Tobj_in_world = Tbbox_in_world @ inv(Tbbox_in_obj) sampled_obj_pose = T.pose2mat((sampled_bb_center, sampled_bb_orn)) @ T.pose_inv( - T.pose2mat((bb_center_in_base, th.tensor([0, 0, 0, 1], dtype=th.float32))) + T.pose2mat((bb_pos_in_base, bb_orn_in_base)) ) # Check that the pose is near one of the poses in the near_poses list if provided. @@ -1898,13 +1927,13 @@ def _sample_pose_with_object_and_predicate( {"target object": target_obj.name, "object in hand": held_obj.name, "relation": pred_map[predicate]}, ) - def _validate_poses(self, candidate_poses, pose_on_obj=None, skip_obstacle_update=False): + def _validate_poses(self, candidate_poses, eef_pose=None, skip_obstacle_update=False): """ Determines whether the robot can reach all poses on the objects and is not in collision at the specified 2d poses Args: candidate_poses (list of arrays): Candidate 2d poses (x, y, yaw) - pose_on_obj (Iterable of arrays or list of Iterables): Pose(s) on the object(s) in the world frame. + eef_pose (Iterable of arrays or list of Iterables): Pose(s) on the object(s) in the world frame. Can be a single pose or list of poses. skip_obstacle_update (bool): Whether to skip updating the obstacles in the motion generator @@ -1917,8 +1946,7 @@ def _validate_poses(self, candidate_poses, pose_on_obj=None, skip_obstacle_updat current_joint_pos = self.robot.get_joint_positions() for pose in candidate_poses: joint_pos = current_joint_pos.clone() - joint_pos[self.robot.base_idx[:2]] = pose[:2] - joint_pos[self.robot.base_idx[3:]] = th.tensor([0.0, 0.0, pose[2]]) + joint_pos[self.robot.base_control_idx] = pose candidate_joint_positions.append(joint_pos) candidate_joint_positions = th.stack(candidate_joint_positions) @@ -1927,6 +1955,8 @@ def _validate_poses(self, candidate_poses, pose_on_obj=None, skip_obstacle_updat obj_in_hand = self._get_obj_in_hand() attached_obj = {self.robot.eef_link_names[self.arm]: obj_in_hand.root_link} if obj_in_hand is not None else None + # No need to check for self-collision here because we are only testing for new base poses + # This assumes the robot doesn't have self collisions currently. invalid_results = self._motion_generator.check_collisions( candidate_joint_positions, self_collision_check=False, @@ -1939,9 +1969,9 @@ def _validate_poses(self, candidate_poses, pose_on_obj=None, skip_obstacle_updat if invalid_results[i].item(): continue - if pose_on_obj is not None: + if eef_pose is not None: pose = self._get_robot_pose_from_2d_pose(candidate_poses[i]) - relative_pose = T.relative_pose_transform(*pose_on_obj, *pose) + relative_pose = T.relative_pose_transform(*eef_pose, *pose) if not self._target_in_reach_of_robot_relative( relative_pose, skip_obstacle_update=skip_obstacle_update ): @@ -1960,8 +1990,16 @@ def _get_robot_pose_from_2d_pose(self, pose_2d): th.tensor: (x,y,z) Position in the world frame th.tensor: (x,y,z,w) Quaternion orientation in the world frame """ - pos = th.tensor([pose_2d[0], pose_2d[1], self.robot.get_position_orientation()[0][2]], dtype=th.float32) - orn = T.euler2quat(th.tensor([0, 0, pose_2d[2]], dtype=th.float32)) + base_joints = self.robot.get_joint_positions()[self.robot.base_idx] + pos = th.tensor([pose_2d[0], pose_2d[1], base_joints[2]], dtype=th.float32) + euler_intrinsic_xyz = th.tensor([base_joints[3], base_joints[4], pose_2d[2]], dtype=th.float32) + mat_x = T.euler2mat(th.tensor([euler_intrinsic_xyz[0], 0, 0], dtype=th.float32)) + mat_y = T.euler2mat(th.tensor([0, euler_intrinsic_xyz[1], 0], dtype=th.float32)) + mat_z = T.euler2mat(th.tensor([0, 0, euler_intrinsic_xyz[2]], dtype=th.float32)) + # intrinsic x-y-z is the same as extrinsic z-y-x + # multiply mat_z first, then mat_y, then mat_x + mat = mat_x @ mat_y @ mat_z + orn = T.mat2quat(mat) return pos, orn def _world_pose_to_robot_pose(self, pose): diff --git a/omnigibson/tasks/grasp_task.py b/omnigibson/tasks/grasp_task.py index a676e65e3..951b98d04 100644 --- a/omnigibson/tasks/grasp_task.py +++ b/omnigibson/tasks/grasp_task.py @@ -114,7 +114,7 @@ def _reset_agent(self, env): # Randomize the robot's 2d pose obj = env.scene.object_registry("name", self.obj_name) grasp_poses = get_grasp_poses_for_object_sticky(obj) - grasp_pose, _ = random.choice(grasp_poses) + grasp_pose = random.choice(grasp_poses) sampled_pose_2d = self._primitive_controller._sample_pose_near_object(obj, pose_on_obj=grasp_pose) robot_pose = self._primitive_controller._get_robot_pose_from_2d_pose(sampled_pose_2d) robot.set_position_orientation(*robot_pose) diff --git a/omnigibson/utils/grasping_planning_utils.py b/omnigibson/utils/grasping_planning_utils.py index 81a0ec3b4..726706841 100644 --- a/omnigibson/utils/grasping_planning_utils.py +++ b/omnigibson/utils/grasping_planning_utils.py @@ -21,12 +21,13 @@ def get_grasp_poses_for_object_sticky(target_obj): """ Obtain a grasp pose for an object from top down, to be used with sticky grasping. + The grasp pose should be in the world frame. Args: target_object (StatefulObject): Object to get a grasp pose for Returns: - List of grasp candidates, where each grasp candidate is a tuple containing the grasp pose and the approach direction. + List of grasp poses. """ aabb_min_world, aabb_max_world = target_obj.aabb @@ -38,12 +39,13 @@ def get_grasp_poses_for_object_sticky(target_obj): towards_object_in_world_frame = bbox_center_world - grasp_center_pos towards_object_in_world_frame /= th.norm(towards_object_in_world_frame) + # Identity quaternion for top-down grasping (x-forward, y-right, z-down) grasp_quat = T.euler2quat(th.tensor([0, 0, 0], dtype=th.float32)) grasp_pose = (grasp_center_pos, grasp_quat) - grasp_candidate = [(grasp_pose, towards_object_in_world_frame)] + grasp_poses = [grasp_pose] - return grasp_candidate + return grasp_poses def get_grasp_poses_for_object_sticky_from_arbitrary_direction(target_obj): From 52e91aff05eeada64e781b918844c8f6472334b1 Mon Sep 17 00:00:00 2001 From: Chengshu Li Date: Fri, 17 Jan 2025 22:06:01 -0800 Subject: [PATCH 62/76] fix robot test and curobo test --- .../starter_semantic_action_primitives.py | 39 ++++++++++--------- omnigibson/examples/robots/curobo_example.py | 14 +++---- tests/test_curobo.py | 22 +++++------ 3 files changed, 39 insertions(+), 36 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 0e8ab47c4..34b4cf7e1 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -34,6 +34,7 @@ from omnigibson.objects.object_base import BaseObject from omnigibson.robots import R1, BaseRobot, BehaviorRobot, Fetch, Freight, Husky, Locobot, Stretch, Tiago, Turtlebot from omnigibson.robots.manipulation_robot import ManipulationRobot +from omnigibson.robots.holonomic_base_robot import HolonomicBaseRobot from omnigibson.tasks.behavior_task import BehaviorTask from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.utils.geometry_utils import wrap_angle @@ -97,7 +98,7 @@ m.HAND_DIST_THRESHOLD = 0.002 m.DEFAULT_DIST_THRESHOLD = 0.005 -m.DEFAULT_ANGLE_THRESHOLD = 0.02 +m.DEFAULT_ANGLE_THRESHOLD = 0.03 m.LOW_PRECISION_DIST_THRESHOLD = 0.1 m.LOW_PRECISION_ANGLE_THRESHOLD = 0.2 @@ -185,7 +186,6 @@ def __init__( collision_activation_distance=m.DEFAULT_COLLISION_ACTIVATION_DISTANCE, ) ) - self._reset_eef_pose = {arm: self.robot.get_relative_eef_pose(arm) for arm in self.robot.arm_names} self._task_relevant_objects_only = task_relevant_objects_only @@ -196,6 +196,7 @@ def __init__( # Store the current position of the arm as the arm target control_dict = self.robot.get_control_dict() self._arm_targets = {} + self._reset_eef_pose = {} if isinstance(self.robot, ManipulationRobot): for arm_name in self.robot.arm_names: eef = f"eef_{arm_name}" @@ -210,6 +211,8 @@ def __init__( arm_target = cb.to_torch(control_dict["joint_position"])[arm_ctrl.dof_idx] self._arm_targets[arm] = arm_target + self._reset_eef_pose[arm_name] = self.robot.get_relative_eef_pose(arm_name) + self._curobo_batch_size = curobo_batch_size self.debug_visual_marker = debug_visual_marker @@ -1641,10 +1644,9 @@ def _navigate_to_pose_direct(self, pose_2d, low_precision=False): angle_threshold = m.LOW_PRECISION_ANGLE_THRESHOLD if low_precision else m.DEFAULT_ANGLE_THRESHOLD end_pose = self._get_robot_pose_from_2d_pose(pose_2d) - body_target_pose = self._world_pose_to_robot_pose(end_pose) - finished_rotating = False for _ in range(m.MAX_STEPS_FOR_WAYPOINT_NAVIGATION): + body_target_pose = self._world_pose_to_robot_pose(end_pose) if th.norm(body_target_pose[0][:2]) < dist_threshold: break @@ -1655,10 +1657,9 @@ def _navigate_to_pose_direct(self, pose_2d, low_precision=False): ) body_intermediate_pose = self._world_pose_to_robot_pose(intermediate_pose) diff_yaw = T.quat2euler(body_intermediate_pose[1])[2].item() - if not finished_rotating and abs(diff_yaw) > m.DEFAULT_ANGLE_THRESHOLD: + if abs(diff_yaw) > m.DEFAULT_ANGLE_THRESHOLD: yield from self._rotate_in_place(intermediate_pose, angle_threshold=m.DEFAULT_ANGLE_THRESHOLD) else: - finished_rotating = True action = self._empty_action() if isinstance(self.robot.controllers["base"], HolonomicBaseJointController): assert ( @@ -1676,8 +1677,6 @@ def _navigate_to_pose_direct(self, pose_2d, low_precision=False): raise ValueError(f"Unsupported base controller: {type(self.robot.controllers['base'])}") yield self._postprocess_action(action) - - body_target_pose = self._world_pose_to_robot_pose(end_pose) else: raise ActionPrimitiveError( ActionPrimitiveError.Reason.EXECUTION_ERROR, @@ -1990,16 +1989,20 @@ def _get_robot_pose_from_2d_pose(self, pose_2d): th.tensor: (x,y,z) Position in the world frame th.tensor: (x,y,z,w) Quaternion orientation in the world frame """ - base_joints = self.robot.get_joint_positions()[self.robot.base_idx] - pos = th.tensor([pose_2d[0], pose_2d[1], base_joints[2]], dtype=th.float32) - euler_intrinsic_xyz = th.tensor([base_joints[3], base_joints[4], pose_2d[2]], dtype=th.float32) - mat_x = T.euler2mat(th.tensor([euler_intrinsic_xyz[0], 0, 0], dtype=th.float32)) - mat_y = T.euler2mat(th.tensor([0, euler_intrinsic_xyz[1], 0], dtype=th.float32)) - mat_z = T.euler2mat(th.tensor([0, 0, euler_intrinsic_xyz[2]], dtype=th.float32)) - # intrinsic x-y-z is the same as extrinsic z-y-x - # multiply mat_z first, then mat_y, then mat_x - mat = mat_x @ mat_y @ mat_z - orn = T.mat2quat(mat) + if isinstance(self.robot, HolonomicBaseRobot): + base_joints = self.robot.get_joint_positions()[self.robot.base_idx] + pos = th.tensor([pose_2d[0], pose_2d[1], base_joints[2]], dtype=th.float32) + euler_intrinsic_xyz = th.tensor([base_joints[3], base_joints[4], pose_2d[2]], dtype=th.float32) + mat_x = T.euler2mat(th.tensor([euler_intrinsic_xyz[0], 0, 0], dtype=th.float32)) + mat_y = T.euler2mat(th.tensor([0, euler_intrinsic_xyz[1], 0], dtype=th.float32)) + mat_z = T.euler2mat(th.tensor([0, 0, euler_intrinsic_xyz[2]], dtype=th.float32)) + # intrinsic x-y-z is the same as extrinsic z-y-x + # multiply mat_z first, then mat_y, then mat_x + mat = mat_x @ mat_y @ mat_z + orn = T.mat2quat(mat) + else: + pos = th.tensor([pose_2d[0], pose_2d[1], 0.0], dtype=th.float32) + orn = T.euler2quat(th.tensor([0.0, 0.0, pose_2d[2]], dtype=th.float32)) return pos, orn def _world_pose_to_robot_pose(self, pose): diff --git a/omnigibson/examples/robots/curobo_example.py b/omnigibson/examples/robots/curobo_example.py index 59f635e6d..9023b2b27 100644 --- a/omnigibson/examples/robots/curobo_example.py +++ b/omnigibson/examples/robots/curobo_example.py @@ -101,21 +101,21 @@ def test_curobo(): "name": "HolonomicBaseJointController", "motor_type": "position", "command_input_limits": None, - "use_impedances": True, + "use_impedances": False, }, "trunk": { "name": "JointController", "motor_type": "position", "command_input_limits": None, "use_delta_commands": False, - "use_impedances": True, + "use_impedances": False, }, "arm_left": { "name": "JointController", "motor_type": "position", "command_input_limits": None, "use_delta_commands": False, - "use_impedances": True, + "use_impedances": False, "pos_kp": 200.0, }, "arm_right": { @@ -123,7 +123,7 @@ def test_curobo(): "motor_type": "position", "command_input_limits": None, "use_delta_commands": False, - "use_impedances": True, + "use_impedances": False, "pos_kp": 200.0, }, "gripper_left": { @@ -131,7 +131,7 @@ def test_curobo(): "motor_type": "position", "command_input_limits": None, "use_delta_commands": False, - "use_impedances": True, + "use_impedances": False, "pos_kp": 1500.0, }, "gripper_right": { @@ -139,7 +139,7 @@ def test_curobo(): "motor_type": "position", "command_input_limits": None, "use_delta_commands": False, - "use_impedances": True, + "use_impedances": False, "pos_kp": 1500.0, }, }, @@ -150,7 +150,7 @@ def test_curobo(): "motor_type": "position", "command_input_limits": None, "use_delta_commands": False, - "use_impedances": True, + "use_impedances": False, } # Create env diff --git a/tests/test_curobo.py b/tests/test_curobo.py index b5d7a2219..d63052930 100644 --- a/tests/test_curobo.py +++ b/tests/test_curobo.py @@ -74,14 +74,14 @@ def test_curobo(): "motor_type": "position", "command_input_limits": None, "use_delta_commands": False, - "use_impedances": True, + "use_impedances": False, }, "gripper_0": { "name": "JointController", "motor_type": "position", "command_input_limits": None, "use_delta_commands": False, - "use_impedances": True, + "use_impedances": False, }, }, }, @@ -97,42 +97,42 @@ def test_curobo(): "name": "HolonomicBaseJointController", "motor_type": "position", "command_input_limits": None, - "use_impedances": True, + "use_impedances": False, }, "trunk": { "name": "JointController", "motor_type": "position", "command_input_limits": None, "use_delta_commands": False, - "use_impedances": True, + "use_impedances": False, }, "arm_left": { "name": "JointController", "motor_type": "position", "command_input_limits": None, "use_delta_commands": False, - "use_impedances": True, + "use_impedances": False, }, "arm_right": { "name": "JointController", "motor_type": "position", "command_input_limits": None, "use_delta_commands": False, - "use_impedances": True, + "use_impedances": False, }, "gripper_left": { "name": "JointController", "motor_type": "position", "command_input_limits": None, "use_delta_commands": False, - "use_impedances": True, + "use_impedances": False, }, "gripper_right": { "name": "JointController", "motor_type": "position", "command_input_limits": None, "use_delta_commands": False, - "use_impedances": True, + "use_impedances": False, }, }, }, @@ -183,14 +183,14 @@ def test_curobo(): "motor_type": "position", "command_input_limits": None, "use_delta_commands": False, - "use_impedances": True, + "use_impedances": False, }, "gripper_right": { "name": "JointController", "motor_type": "position", "command_input_limits": None, "use_delta_commands": False, - "use_impedances": True, + "use_impedances": False, }, }, }, @@ -233,7 +233,7 @@ def test_curobo(): batch_size=batch_size, debug=False, use_cuda_graph=True, - collision_activation_distance=0.03, # Use larger activation distance for better reproducibility + collision_activation_distance=0.02, # Use larger activation distance for better reproducibility use_default_embodiment_only=True, ) From 0bea80c1fb906a7402926c7cca94ad6fec7fe1eb Mon Sep 17 00:00:00 2001 From: Chengshu Li Date: Fri, 17 Jan 2025 22:47:23 -0800 Subject: [PATCH 63/76] fix primitive test, factor out sample_grasp_pose --- .../starter_semantic_action_primitives.py | 64 +++++++++++-------- 1 file changed, 39 insertions(+), 25 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 34b4cf7e1..e265616bf 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -488,6 +488,36 @@ def _move_hand_upward(self, steps=5, speed=0.1): action[self.robot.controller_action_idx["arm_{}".format(self.arm)][2]] = speed yield self._postprocess_action(action) + def _sample_grasp_pose(self, obj): + """ + Samples an eef grasp pose for the object + + Args: + obj (BaseObject): Object to sample grasp pose for + + Returns: + Tuple[th.tensor, th.tensor]: Pregrasp pose and grasp pose of the robot eef in the world frame + """ + indented_print("Sampling grasp pose") + grasp_poses = get_grasp_poses_for_object_sticky(obj) + grasp_pos, grasp_quat = random.choice(grasp_poses) + + # Identity quaternion for top-down grasping (x-forward, y-right, z-down) + approach_dir = T.quat2mat(grasp_quat) @ th.tensor([0.0, 0.0, -1.0]) + + pregrasp_offset = self.robot.finger_lengths[self.arm] / 2.0 + m.GRASP_APPROACH_DISTANCE + + pregrasp_pos = grasp_pos - approach_dir * pregrasp_offset + + # The sampled grasp pose is robot-agnostic + # We need to multiply by the quaternion of the robot's eef frame of top-down grasping (x-forward, y-right, z-down) + grasp_quat = T.quat_multiply(grasp_quat, th.tensor([1.0, 0.0, 0.0, 0.0])) + + pregrasp_pose = (pregrasp_pos, grasp_quat) + grasp_pose = (grasp_pos, grasp_quat) + + return pregrasp_pose, grasp_pose + def _grasp(self, obj): """ Yields action for the robot to navigate to object if needed, then to grasp it @@ -530,24 +560,7 @@ def _grasp(self, obj): indented_print("Opening hand before grasping") yield from self._execute_release() - # Allow grasping from suboptimal extents if we've tried enough times. - indented_print("Sampling grasp pose") - grasp_poses = get_grasp_poses_for_object_sticky(obj) - grasp_pos, grasp_quat = random.choice(grasp_poses) - - # Identity quaternion for top-down grasping (x-forward, y-right, z-down) - approach_dir = T.quat2mat(grasp_quat) @ th.tensor([0.0, 0.0, -1.0]) - - pregrasp_offset = self.robot.finger_lengths[self.arm] / 2.0 + m.GRASP_APPROACH_DISTANCE - - pregrasp_pos = grasp_pos - approach_dir * pregrasp_offset - - # The sampled grasp pose is robot-agnostic - # We need to multiply by the quaternion of the robot's eef frame of top-down grasping (x-forward, y-right, z-down) - grasp_quat = T.quat_multiply(grasp_quat, th.tensor([1.0, 0.0, 0.0, 0.0])) - - pregrasp_pose = (pregrasp_pos, grasp_quat) - grasp_pose = (grasp_pos, grasp_quat) + pregrasp_pose, grasp_pose = self._sample_grasp_pose(obj) # If the pre-grasp pose is too far, navigate. indented_print("Navigating to grasp pose if needed") @@ -1600,7 +1613,10 @@ def _navigate_if_needed(self, obj, eef_pose=None, **kwargs): th.tensor or None: Action array for one step for the robot to navigate or None if it is done navigating """ self._motion_generator.update_obstacles() - eef_pose = eef_pose if eef_pose is not None else obj.get_position_orientation() + + if eef_pose is None: + eef_pose, _ = self._sample_grasp_pose(obj) + if self._target_in_reach_of_robot(eef_pose, skip_obstacle_update=True): # No need to navigate. return @@ -1761,12 +1777,10 @@ def _sample_pose_near_object( yaw_lo, yaw_hi = -math.pi, math.pi avg_arm_workspace_range = th.mean(self.robot.arm_workspace_range[self.arm]) - target_pose = eef_pose if eef_pose is not None else obj.get_position_orientation() - # target_pose = ( - # (self._sample_position_on_aabb_side(obj), self._get_reset_eef_pose()[self.arm][1]) - # if eef_pose is None - # else eef_pose - # ) + if eef_pose is None: + eef_pose, _ = self._sample_grasp_pose(obj) + + target_pose = eef_pose obj_rooms = ( obj.in_rooms if obj.in_rooms else [self.robot.scene._seg_map.get_room_instance_by_point(target_pose[0][:2])] From d02eaf19a08279e1bda61f5c7a20bc02194ac86c Mon Sep 17 00:00:00 2001 From: Chengshu Li Date: Sat, 18 Jan 2025 13:41:00 -0800 Subject: [PATCH 64/76] reduce curobo batch size for primitive tests --- tests/test_primitives.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_primitives.py b/tests/test_primitives.py index ffe638684..aecfa5c42 100644 --- a/tests/test_primitives.py +++ b/tests/test_primitives.py @@ -79,7 +79,7 @@ def primitive_tester(env, objects, primitives, primitives_args): for _ in range(30): og.sim.step() - controller = StarterSemanticActionPrimitives(env, env.robots[0], enable_head_tracking=False) + controller = StarterSemanticActionPrimitives(env, env.robots[0], enable_head_tracking=False, curobo_batch_size=1) try: for primitive, args in zip(primitives, primitives_args): execute_controller(controller.apply_ref(primitive, *args, attempts=1), env) From 24dd194028f9444c54c4bbb635cecb8d51a69cf0 Mon Sep 17 00:00:00 2001 From: Chengshu Li Date: Tue, 21 Jan 2025 15:02:18 -0800 Subject: [PATCH 65/76] fix T.align_vector_sets, minor fixes for curobo test and sampling utils --- omnigibson/utils/sampling_utils.py | 3 ++- omnigibson/utils/transform_utils.py | 31 ++++++++++---------------- omnigibson/utils/transform_utils_np.py | 25 ++------------------- tests/test_curobo.py | 23 ++++--------------- 4 files changed, 20 insertions(+), 62 deletions(-) diff --git a/omnigibson/utils/sampling_utils.py b/omnigibson/utils/sampling_utils.py index 975558556..cc3f9b3b5 100644 --- a/omnigibson/utils/sampling_utils.py +++ b/omnigibson/utils/sampling_utils.py @@ -150,7 +150,8 @@ def get_parallel_rays(source, destination, offset, new_ray_per_horizontal_distan ray_direction = destination - source # Get an orthogonal vector using a random vector. - random_vector = th.rand(3) + random_vector = th.randn(3) + random_vector /= th.norm(random_vector) orthogonal_vector_1 = th.linalg.cross(ray_direction, random_vector) orthogonal_vector_1 /= th.norm(orthogonal_vector_1) diff --git a/omnigibson/utils/transform_utils.py b/omnigibson/utils/transform_utils.py index 6db9445bf..129762cbe 100644 --- a/omnigibson/utils/transform_utils.py +++ b/omnigibson/utils/transform_utils.py @@ -1231,6 +1231,7 @@ def vecs2quat(vec0: torch.Tensor, vec1: torch.Tensor, normalized: bool = False) return quat_unnormalized / torch.norm(quat_unnormalized, dim=-1, keepdim=True) +# Ref: https://github.com/scipy/scipy/blob/9974222eb58ec3eafe5d12f25ee960f3170c277a/scipy/spatial/transform/_rotation.pyx#L3249 @torch.compile def align_vector_sets(vec_set1: torch.Tensor, vec_set2: torch.Tensor) -> torch.Tensor: """ @@ -1243,29 +1244,21 @@ def align_vector_sets(vec_set1: torch.Tensor, vec_set2: torch.Tensor) -> torch.T Returns: torch.Tensor: (4,) Normalized quaternion representing the overall rotation """ - # Compute the cross-covariance matrix - H = vec_set2.T @ vec_set1 + B = torch.einsum("ji,jk->ik", vec_set1, vec_set2) + u, s, vh = torch.linalg.svd(B) - # Compute the elements for the quaternion - trace = H.trace() - w = trace + 1 - x = H[1, 2] - H[2, 1] - y = H[2, 0] - H[0, 2] - z = H[0, 1] - H[1, 0] + # Correct improper rotation if necessary (as in Kabsch algorithm) + if torch.linalg.det(u @ vh) < 0: + s[-1] = -s[-1] + u[:, -1] = -u[:, -1] - # Construct the quaternion - quat = torch.stack([x, y, z, w]) + C = u @ vh - # Handle the case where w is close to zero - if quat[3] < 1e-4: - quat[3] = 0 - max_idx = torch.argmax(quat[:3].abs()) + 1 - quat[max_idx] = 1 + # if s[1] + s[2] < 1e-16 * s[0]: + # warnings.warn("Optimal rotation is not uniquely or poorly defined " + # "for the given sets of vectors.") - # Normalize the quaternion - quat = quat / (torch.norm(quat) + 1e-8) # Add epsilon to avoid division by zero - - return quat + return mat2quat(C) @torch.compile diff --git a/omnigibson/utils/transform_utils_np.py b/omnigibson/utils/transform_utils_np.py index b76b2606c..84ca95724 100644 --- a/omnigibson/utils/transform_utils_np.py +++ b/omnigibson/utils/transform_utils_np.py @@ -1134,29 +1134,8 @@ def align_vector_sets(vec_set1, vec_set2): Returns: np.array: (4,) Normalized quaternion representing the overall rotation """ - # Compute the cross-covariance matrix - H = vec_set2.T @ vec_set1 - - # Compute the elements for the quaternion - trace = H.trace() - w = trace + 1 - x = H[1, 2] - H[2, 1] - y = H[2, 0] - H[0, 2] - z = H[0, 1] - H[1, 0] - - # Construct the quaternion - quat = np.stack([x, y, z, w]) - - # Handle the case where w is close to zero - if quat[3] < 1e-4: - quat[3] = 0 - max_idx = np.argmax(quat[:3].abs()) + 1 - quat[max_idx] = 1 - - # Normalize the quaternion - quat = quat / (np.linalg.norm(quat) + 1e-8) # Add epsilon to avoid division by zero - - return quat + rot, _ = R.align_vectors(vec_set1, vec_set2) + return rot.as_quat() def l2_distance(v1, v2): diff --git a/tests/test_curobo.py b/tests/test_curobo.py index d63052930..477d518cb 100644 --- a/tests/test_curobo.py +++ b/tests/test_curobo.py @@ -6,7 +6,6 @@ import torch as th import omnigibson as og -import omnigibson.utils.transform_utils as T from omnigibson.action_primitives.curobo import CuRoboMotionGenerator from omnigibson.macros import gm from omnigibson.robots.holonomic_base_robot import HolonomicBaseRobot @@ -233,7 +232,7 @@ def test_curobo(): batch_size=batch_size, debug=False, use_cuda_graph=True, - collision_activation_distance=0.02, # Use larger activation distance for better reproducibility + collision_activation_distance=0.03, # Use larger activation distance for better reproducibility use_default_embodiment_only=True, ) @@ -262,7 +261,6 @@ def test_curobo(): false_positive = 0 false_negative = 0 - target_pos_in_world_frame = defaultdict(list) for i, (q, curobo_has_contact) in enumerate(zip(random_qs, collision_results)): # Set robot to desired qpos robot.set_joint_positions(q) @@ -320,19 +318,10 @@ def test_curobo(): # neither cuRobo nor physx reports contact, valid planning goals elif not curobo_has_contact and not physx_has_contact: for arm_name in robot.arm_names: - # For holonomic base robots, we need to be in the frame of @robot.root_link, not @robot.base_footprint_link - if isinstance(robot, HolonomicBaseRobot): - base_link_pose = robot.root_link.get_position_orientation() - eef_link_pose = robot.eef_links[arm_name].get_position_orientation() - eef_pos, eef_quat = T.relative_pose_transform(*eef_link_pose, *base_link_pose) - else: - eef_pos, eef_quat = robot.get_relative_eef_pose(arm_name) - + eef_pos, eef_quat = robot.get_eef_pose(arm_name) target_pos[robot.eef_link_names[arm_name]].append(eef_pos) target_quat[robot.eef_link_names[arm_name]].append(eef_quat) - target_pos_in_world_frame[robot.eef_link_names[arm_name]].append(robot.get_eef_position(arm_name)) - print( f"Collision checking false positive: {false_positive / n_samples}, false negative: {false_negative / n_samples}." ) @@ -348,14 +337,10 @@ def test_curobo(): for arm_name in robot.arm_names: target_pos[robot.eef_link_names[arm_name]] = th.stack(target_pos[robot.eef_link_names[arm_name]], dim=0) target_quat[robot.eef_link_names[arm_name]] = th.stack(target_quat[robot.eef_link_names[arm_name]], dim=0) - target_pos_in_world_frame[robot.eef_link_names[arm_name]] = th.stack( - target_pos_in_world_frame[robot.eef_link_names[arm_name]], dim=0 - ) # Cast defaultdict to dict target_pos = dict(target_pos) target_quat = dict(target_quat) - target_pos_in_world_frame = dict(target_pos_in_world_frame) print(f"Planning for {len(target_pos[robot.eef_link_names[robot.default_arm]])} eef targets...") @@ -367,7 +352,7 @@ def test_curobo(): successes, traj_paths = cmg.compute_trajectories( target_pos=target_pos, target_quat=target_quat, - is_local=True, + is_local=False, max_attempts=1, timeout=60.0, ik_fail_return=5, @@ -400,7 +385,7 @@ def test_curobo(): # Move the markers to the desired eef positions for marker, arm_name in zip(eef_markers, robot.arm_names): eef_link_name = robot.eef_link_names[arm_name] - marker.set_position_orientation(position=target_pos_in_world_frame[eef_link_name][traj_idx]) + marker.set_position_orientation(position=target_pos[eef_link_name][traj_idx]) q_traj = cmg.path_to_joint_trajectory(traj_path) # joint_positions_set_point = [] From 99d8b3cd336b9197eb91cb108132985809b81c43 Mon Sep 17 00:00:00 2001 From: Chengshu Li Date: Wed, 22 Jan 2025 09:21:19 -0800 Subject: [PATCH 66/76] add gf_quat_to_torch --- omnigibson/utils/numpy_utils.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/omnigibson/utils/numpy_utils.py b/omnigibson/utils/numpy_utils.py index 93478e9da..2b33e08d4 100644 --- a/omnigibson/utils/numpy_utils.py +++ b/omnigibson/utils/numpy_utils.py @@ -1,5 +1,6 @@ import numpy as np import torch as th +import omnigibson.lazy as lazy class NumpyTypes: @@ -9,12 +10,22 @@ class NumpyTypes: UINT32 = np.uint32 -def vtarray_to_torch(vtarray, dtype=th.float32, device="cpu"): +def numpy_to_torch(np_array, dtype=th.float32, device="cpu"): if device == "cpu": - return th.from_numpy(np.array(vtarray)).to(dtype) + return th.from_numpy(np_array).to(dtype) else: assert device.startswith("cuda") - return th.tensor(np.array(vtarray), dtype=dtype, device=device) + return th.tensor(np_array, dtype=dtype, device=device) + + +def vtarray_to_torch(vtarray, dtype=th.float32, device="cpu"): + np_array = np.array(vtarray) + return numpy_to_torch(np_array, dtype=dtype, device=device) + + +def gf_quat_to_torch(gf_quat, dtype=th.float32, device="cpu"): + np_array = lazy.omni.isaac.core.utils.rotations.gf_quat_to_np_array(gf_quat)[[1, 2, 3, 0]] + return numpy_to_torch(np_array, dtype=dtype, device=device) def pil_to_tensor(pil_image): From f42dd11d862453c95cc6d1fd513a27597a608db4 Mon Sep 17 00:00:00 2001 From: Chengshu Li Date: Wed, 22 Jan 2025 09:22:22 -0800 Subject: [PATCH 67/76] add local orientation and position for joint prim --- omnigibson/prims/joint_prim.py | 37 ++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 11 deletions(-) diff --git a/omnigibson/prims/joint_prim.py b/omnigibson/prims/joint_prim.py index bef1898dd..d88077439 100644 --- a/omnigibson/prims/joint_prim.py +++ b/omnigibson/prims/joint_prim.py @@ -4,13 +4,13 @@ import omnigibson as og import omnigibson.lazy as lazy -import omnigibson.utils.transform_utils as T from omnigibson.controllers.controller_base import ControlType from omnigibson.macros import create_module_macros from omnigibson.prims.prim_base import BasePrim from omnigibson.utils.constants import JointAxis, JointType from omnigibson.utils.python_utils import assert_valid_key from omnigibson.utils.usd_utils import PoseAPI, create_joint +from omnigibson.utils.numpy_utils import gf_quat_to_torch, vtarray_to_torch # Create settings for this module m = create_module_macros(module_path=__file__) @@ -280,21 +280,36 @@ def body1(self, body1): self._body1 = None @property - def local_orientation(self): + def local_orientation_0(self): """ Returns: 4-array: (x,y,z,w) local quaternion orientation of this joint, relative to the parent link """ - # Grab local rotation to parent and child links - quat0 = lazy.omni.isaac.core.utils.rotations.gf_quat_to_np_array(self.get_attribute("physics:localRot0"))[ - [1, 2, 3, 0] - ] - quat1 = lazy.omni.isaac.core.utils.rotations.gf_quat_to_np_array(self.get_attribute("physics:localRot1"))[ - [1, 2, 3, 0] - ] + return gf_quat_to_torch(self.get_attribute("physics:localRot0")) - # Invert the child link relationship, and multiply the two rotations together to get the final rotation - return T.quat_multiply(quaternion1=T.quat_inverse(quat1), quaternion0=quat0) + @property + def local_orientation_1(self): + """ + Returns: + 4-array: (x,y,z,w) local quaternion orientation of this joint, relative to the child link + """ + return gf_quat_to_torch(self.get_attribute("physics:localRot1")) + + @property + def local_position_0(self): + """ + Returns: + 3-array: (x,y,z) local position of this joint, relative to the parent link + """ + return vtarray_to_torch(self.get_attribute("physics:localPos0")) + + @property + def local_position_1(self): + """ + Returns: + 3-array: (x,y,z) local position of this joint, relative to the child link + """ + return vtarray_to_torch(self.get_attribute("physics:localPos1")) @property def joint_name(self): From 311691e835e6e944dea4e454f7204f68fe52fa1c Mon Sep 17 00:00:00 2001 From: Chengshu Li Date: Wed, 22 Jan 2025 09:33:18 -0800 Subject: [PATCH 68/76] fix update_locked_joints with arbitrary joint state --- omnigibson/action_primitives/curobo.py | 77 +++++++++++++++++++++----- 1 file changed, 62 insertions(+), 15 deletions(-) diff --git a/omnigibson/action_primitives/curobo.py b/omnigibson/action_primitives/curobo.py index 9cdb82282..5df5a4eb1 100644 --- a/omnigibson/action_primitives/curobo.py +++ b/omnigibson/action_primitives/curobo.py @@ -265,7 +265,13 @@ def update_obstacles(self, ignore_objects=None): self.mg[CuRoboEmbodimentSelection.DEFAULT].update_world(world) def check_collisions( - self, q, self_collision_check=True, skip_obstacle_update=False, attached_obj=None, attached_obj_scale=None + self, + q, + initial_joint_pos=None, + self_collision_check=True, + skip_obstacle_update=False, + attached_obj=None, + attached_obj_scale=None, ): """ Checks collisions between the sphere representation of the robot and the rest of the current scene @@ -273,6 +279,8 @@ def check_collisions( Args: q (th.tensor): (N, D)-shaped tensor, representing N-total different joint configurations to check collisions against the world + initial_joint_pos (None or th.tensor): If specified, the initial joint positions to set the locked joints. + Default is the current joint positions of the robot self_collision_check (bool): Whether to check self-collisions or not skip_obstacle_update (bool): Whether to skip updating the obstacles in the world collision checker attached_obj (None or Dict[str, BaseObject]): If specified, a dictionary where the keys are the end-effector @@ -290,7 +298,8 @@ def check_collisions( if not skip_obstacle_update: self.update_obstacles() - q_pos = self.robot.get_joint_positions().unsqueeze(0) + q_pos = self.robot.get_joint_positions() if initial_joint_pos is None else initial_joint_pos + q_pos = q_pos.unsqueeze(0) cu_joint_state = lazy.curobo.types.state.JointState( position=self._tensor_args.to_device(q_pos), joint_names=self.robot_joint_names, @@ -348,18 +357,47 @@ def update_locked_joints(self, cu_joint_state, emb_sel): # Update the lock joint state position kc.lock_jointstate.position = cu_joint_state.get_ordered_joint_state(kc.lock_jointstate.joint_names).position[0] # Update all the fixed transforms between the parent links and the child links of these joints - for joint_name in kc.lock_jointstate.joint_names: + for i, joint_name in enumerate(kc.lock_jointstate.joint_names): joint = self.robot.joints[joint_name] - parent_link_name, child_link_name = joint.body0.split("/")[-1], joint.body1.split("/")[-1] - parent_link = self.robot.links[parent_link_name] - child_link = self.robot.links[child_link_name] - relative_pose = T.pose2mat( - T.relative_pose_transform( - *child_link.get_position_orientation(), *parent_link.get_position_orientation() - ) - ) + joint_pos = kc.lock_jointstate.position[i] + child_link_name = joint.body1.split("/")[-1] + + # Compute the fixed transform between the parent link and the child link + # Note that we cannot directly query the parent and child link poses from OG + # because the cu_joint_state might not represent the current joint position in OG + + jf_to_cf_pose = joint.local_position_1, joint.local_orientation_1 + # Compute the transform from child frame to joint frame + cf_to_jf_pose = T.invert_pose_transform(*jf_to_cf_pose) + + # Compute the transform from the joint frame to the joint frame moved by the joint position + if joint.joint_type == JointType.JOINT_FIXED: + jf_to_jf_moved_pos = th.zeros(3) + jf_to_jf_moved_quat = th.tensor([0.0, 0.0, 0.0, 1.0]) + elif joint.joint_type == JointType.JOINT_PRISMATIC: + jf_to_jf_moved_pos = th.tensor([0.0, 0.0, 0.0]) + jf_to_jf_moved_pos[["X", "Y", "Z"].index(joint.axis)] = joint_pos + jf_to_jf_moved_quat = th.tensor([0.0, 0.0, 0.0, 1.0]) + elif joint.joint_type == JointType.JOINT_REVOLUTE: + jf_to_jf_moved_pos = th.zeros(3) + axis = th.zeros(3) + axis[["X", "Y", "Z"].index(joint.axis)] = 1.0 + jf_to_jf_moved_quat = T.axisangle2quat(axis * joint_pos.cpu()) + else: + raise NotImplementedError(f"Joint type {joint.joint_type} not supported") + + # Compute the transform from the child frame to the joint frame moved by the joint position + cf_to_jf_moved_pose = T.pose_transform(jf_to_jf_moved_pos, jf_to_jf_moved_quat, *cf_to_jf_pose) + + # Compute the transform from the joint frame moved by the joint position to the parent frame + jf_moved_to_pf_pose = joint.local_position_0, joint.local_orientation_0 + + # Compute the transform from the child frame to the parent frame + cf_to_pf_pose = T.pose_transform(*jf_moved_to_pf_pose, *cf_to_jf_moved_pose) + cf_to_pf_pose = T.pose2mat(cf_to_pf_pose) + link_idx = kc.link_name_to_idx_map[child_link_name] - kc.fixed_transforms[link_idx] = relative_pose + kc.fixed_transforms[link_idx] = cf_to_pf_pose def solve_ik_batch( self, start_state, goal_pose, plan_config, link_poses=None, emb_sel=CuRoboEmbodimentSelection.DEFAULT @@ -434,6 +472,7 @@ def compute_trajectories( self, target_pos, target_quat, + initial_joint_pos=None, is_local=False, max_attempts=5, timeout=2.0, @@ -462,6 +501,8 @@ def compute_trajectories( where each entry is an individual (x,y,z,w) quaternion to reach with the default end-effector link specified @self.ee_link[emb_sel]. If a dictionary is given, the keys should be the end-effector links and the values should be the corresponding (N, 4) tensors + initial_joint_pos (None or th.Tensor): If specified, the initial joint positions to start the trajectory. + Default is the current joint positions of the robot is_local (bool): Whether @target_pos and @target_quat are specified in the robot's local frame or the world global frame max_attempts (int): Maximum number of attempts for trying to compute a valid trajectory @@ -573,9 +614,15 @@ def compute_trajectories( ) # Construct initial state - q_pos = th.stack([self.robot.get_joint_positions()] * self.batch_size, axis=0) - q_vel = th.stack([self.robot.get_joint_velocities()] * self.batch_size, axis=0) - q_eff = th.stack([self.robot.get_joint_efforts()] * self.batch_size, axis=0) + if initial_joint_pos is None: + q_pos = th.stack([self.robot.get_joint_positions()] * self.batch_size, axis=0) + q_vel = th.stack([self.robot.get_joint_velocities()] * self.batch_size, axis=0) + q_eff = th.stack([self.robot.get_joint_efforts()] * self.batch_size, axis=0) + else: + q_pos = th.stack([initial_joint_pos] * self.batch_size, axis=0) + q_vel = th.zeros_like(q_pos) + q_eff = th.zeros_like(q_pos) + cu_joint_state = lazy.curobo.types.state.JointState( position=self._tensor_args.to_device(q_pos), # TODO: Ideally these should be nonzero, but curobo fails to compute a solution if so From d79d2c010b44050d818c4b7d57facdc6d5b9c22b Mon Sep 17 00:00:00 2001 From: Chengshu Li Date: Wed, 22 Jan 2025 10:43:51 -0800 Subject: [PATCH 69/76] check world collision when checking reachability, minimize update_obstacles --- .../starter_semantic_action_primitives.py | 110 +++++++----------- 1 file changed, 45 insertions(+), 65 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index e265616bf..a3cf058f7 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -131,7 +131,8 @@ class StarterSemanticActionPrimitiveSet(IntEnum): TOGGLE_OFF = auto(), "Toggle an object off" -# TODO: overwrite_head_action might move the head that causes self-collision with curobo-generated motion plans +# (TODO) execution failure: overwrite_head_action might move the head that causes self-collision with curobo-generated motion plans (at the time of curobo planning, it assumes the head doesn't move). +# (TODO) planning failure: after placing an object and opening the gripper, the robot might fail to plan during the subsequent reset_robot() because the opened gripper fingers collides with the world. class StarterSemanticActionPrimitives(BaseActionPrimitiveSet): @@ -705,7 +706,7 @@ def _place_with_predicate(self, obj, predicate): valid_navigation_pose = None for _ in range(m.MAX_ATTEMPTS_FOR_SAMPLING_PLACE_POSE): # Sample one pose at a time - obj_pose = self._sample_pose_with_object_and_predicate(predicate, obj_in_hand, obj, keep_orientation=True) + obj_pose = self._sample_pose_with_object_and_predicate(predicate, obj_in_hand, obj, world_aligned=True) hand_pose = self._get_hand_pose_for_object_pose(obj_pose) # First check if we can directly move the hand there @@ -720,7 +721,7 @@ def _place_with_predicate(self, obj, predicate): ) if valid_navigation_pose is not None: - yield from self._navigate_to_pose(valid_navigation_pose) + yield from self._navigate_to_pose(valid_navigation_pose, skip_obstacle_update=True) yield from self._move_hand(hand_pose) break @@ -755,14 +756,14 @@ def _convert_cartesian_to_joint_space(self, target_pose): Gets joint positions for the arm so eef is at the target pose Args: - target_pose (Iterable of array): Position and orientation arrays in an iterable for pose for the eef + target_pose (Tuple[th.tensor, th.tensor]): target pose to reach for the default end-effector in the world frame Returns: 2-tuple - th.tensor or None: Joint positions to reach target pose or None if impossible to reach target pose - th.tensor: Indices for joints in the robot """ - joint_pos = self._ik_solver_cartesian_to_joint_space(target_pose, frame="world") + joint_pos = self._ik_solver_cartesian_to_joint_space(target_pose) if joint_pos is None: raise ActionPrimitiveError( ActionPrimitiveError.Reason.PLANNING_ERROR, @@ -770,12 +771,12 @@ def _convert_cartesian_to_joint_space(self, target_pose): ) return joint_pos - def _target_in_reach_of_robot(self, target_pose, skip_obstacle_update=False): + def _target_in_reach_of_robot(self, target_pose, initial_joint_pos=None, skip_obstacle_update=False): """ Determines whether the eef for the robot can reach the target pose in the world frame Args: - target_pose (Iterable of array): Position and orientation arrays in an iterable for the pose for the eef + target_pose (Tuple[th.tensor, th.tensor]): target pose to reach for the default end-effector in the world frame skip_obstacle_update (bool): Whether to skip updating obstacles Returns: @@ -784,29 +785,8 @@ def _target_in_reach_of_robot(self, target_pose, skip_obstacle_update=False): return ( self._ik_solver_cartesian_to_joint_space( target_pose, + initial_joint_pos=initial_joint_pos, skip_obstacle_update=skip_obstacle_update, - frame="world", - ) - is not None - ) - - def _target_in_reach_of_robot_relative(self, relative_target_pose, skip_obstacle_update=False): - """ - Determines whether eef for the robot can reach the target pose where the target pose is in the robot frame - - Args: - relative_target_pose (Iterable of array): Position and orientation arrays in an iterable for pose for the eef in the robot frame - skip_obstacle_update (bool): Whether to skip updating obstacles - - Returns: - bool: Whether the default eef can reach the target pose - """ - - return ( - self._ik_solver_cartesian_to_joint_space( - relative_target_pose, - skip_obstacle_update=skip_obstacle_update, - frame="robot", ) is not None ) @@ -815,27 +795,21 @@ def _manipulation_control_idx(self): """The appropriate manipulation control idx for the current settings.""" return th.cat([self.robot.trunk_control_idx, self.robot.arm_control_idx[self.arm]]) - def _ik_solver_cartesian_to_joint_space(self, target_pose, skip_obstacle_update=False, frame="robot"): + def _ik_solver_cartesian_to_joint_space(self, target_pose, initial_joint_pos=None, skip_obstacle_update=False): """ - Get joint positions for the arm so eef is at the target pose where the target pose is in the robot frame + Get joint positions for the arm so eef is at the target pose in the world frame Args: - relative_target_pose (Iterable of array): Position and orientation arrays in an iterable for pose in the robot frame + target_pose (Tuple[th.tensor, th.tensor]): target pose to reach for the default end-effector in the world frame + initial_joint_pos (None or th.tensor): If specified, the initial joint positions to set the locked joints. + Default is the current joint positions of the robot skip_obstacle_update (bool): Whether to skip updating obstacles - frame (str): Frame to use for the target pose Returns: 2-tuple - th.tensor or None: Joint positions to reach target pose or None if impossible to reach the target pose - th.tensor: Indices for joints in the robot """ - if frame == "robot": - target_pose = self._robot_pose_to_world_pose(target_pose) - elif frame == "world": - pass - else: - raise ValueError(f"Unsupported frame: {frame}") - target_pos = {self.robot.eef_link_names[self.arm]: target_pose[0]} target_quat = {self.robot.eef_link_names[self.arm]: target_pose[1]} @@ -847,6 +821,7 @@ def _ik_solver_cartesian_to_joint_space(self, target_pose, skip_obstacle_update= successes, joint_states = self._motion_generator.compute_trajectories( target_pos=target_pos, target_quat=target_quat, + initial_joint_pos=initial_joint_pos, is_local=False, max_attempts=math.ceil(m.MAX_PLANNING_ATTEMPTS / self._motion_generator.batch_size), timeout=60.0, @@ -860,9 +835,10 @@ def _ik_solver_cartesian_to_joint_space(self, target_pose, skip_obstacle_update= motion_constraint=None, skip_obstacle_update=skip_obstacle_update, ik_only=True, - ik_world_collision_check=False, + ik_world_collision_check=True, emb_sel=CuRoboEmbodimentSelection.ARM, ) + # Grab the first successful joint state if found success_idx = th.where(successes)[0].cpu() if len(success_idx) == 0: @@ -887,7 +863,7 @@ def _move_hand( Yields action for the robot to move hand so the eef is in the target pose using the planner Args: - target_pose (Iterable of array): Position and orientation arrays in an iterable for pose + target_pose (Tuple[th.tensor, th.tensor]): target pose to reach for the default end-effector in the world frame stop_on_contact (bool): Whether to stop executing motion plan if contact is detected motion_constraint (MotionConstraint): Motion constraint for the motion low_precision (bool): Whether to use low precision for the motion @@ -939,7 +915,7 @@ def _plan_joint_motion( target_quat, embodiment_selection=CuRoboEmbodimentSelection.DEFAULT, motion_constraint=None, - skip_obstalce_update=False, + skip_obstacle_update=False, ignore_objects=None, ): # If an object is grasped, we need to pass it to the motion planner @@ -952,7 +928,7 @@ def _plan_joint_motion( k: th.stack([v for _ in range(self._motion_generator.batch_size)]) for k, v in target_quat.items() } - if not skip_obstalce_update: + if not skip_obstacle_update: self._motion_generator.update_obstacles(ignore_objects=ignore_objects) successes, traj_paths = self._motion_generator.compute_trajectories( @@ -998,7 +974,7 @@ def _execute_motion_plan( self, q_traj, stop_on_contact=False, ignore_failure=False, low_precision=False, ignore_physics=False ): for i, joint_pos in enumerate(q_traj): - indented_print(f"Executing motion plan step {i + 1}/{len(q_traj)}") + # indented_print(f"Executing motion plan step {i + 1}/{len(q_traj)}") if ignore_physics: self.robot.set_joint_positions(joint_pos) og.sim.step() @@ -1222,7 +1198,7 @@ def _move_hand_linearly_cartesian( space from its current pose Args: - target_pose (Iterable of array): Position and orientation arrays in an iterable for pose + target_pose (Tuple[th.tensor, th.tensor]): target pose to reach for the default end-effector in the world frame stop_on_contact (boolean): Determines whether to stop move once an object is hit ignore_failure (boolean): Determines whether to throw error for not reaching final joint positions @@ -1556,12 +1532,13 @@ def _get_reset_eef_pose(self, frame="robot"): else: raise ValueError(f"Unsupported frame: {frame}") - def _navigate_to_pose(self, pose_2d): + def _navigate_to_pose(self, pose_2d, skip_obstacle_update=False): """ Yields the action to navigate robot to the specified 2d pose Args: pose_2d (Iterable): (x, y, yaw) 2d pose + skip_obstacle_update (bool): Determines whether to skip updating the obstacles in the scene Returns: th.tensor or None: Action array for one step for the robot to navigate or None if it is done navigating @@ -1574,7 +1551,12 @@ def _navigate_to_pose(self, pose_2d): # print("base motion planning") # breakpoint() - q_traj = self._plan_joint_motion(target_pos, target_quat, CuRoboEmbodimentSelection.BASE) + q_traj = self._plan_joint_motion( + target_pos, + target_quat, + embodiment_selection=CuRoboEmbodimentSelection.BASE, + skip_obstacle_update=skip_obstacle_update, + ) yield from self._execute_motion_plan(q_traj) def _draw_plan(self, plan): @@ -1607,7 +1589,7 @@ def _navigate_if_needed(self, obj, eef_pose=None, **kwargs): Args: obj (StatefulObject): Object for the robot to be in range of - eef_pose (Iterable): (pos, quat) Pose + eef_pose (Tuple[th.tensor, th.tensor]): target pose to reach for the default end-effector in the world frame Returns: th.tensor or None: Action array for one step for the robot to navigate or None if it is done navigating @@ -1629,7 +1611,7 @@ def _navigate_to_obj(self, obj, eef_pose=None, skip_obstacle_update=False, **kwa Args: obj (StatefulObject or list of StatefulObject): object(s) to be in range of - eef_pose (Iterable or list of Iterable): (pos, quat) Pose(s) + eef_pose (Tuple[th.tensor, th.tensor]): target pose to reach for the default end-effector in the world frame Returns: th.tensor or None: Action array for one step for the robot to navigate in range or None if it is done navigating @@ -1643,7 +1625,7 @@ def _navigate_to_obj(self, obj, eef_pose=None, skip_obstacle_update=False, **kwa "Could not find a valid pose near the object", {"object": obj.name}, ) - yield from self._navigate_to_pose(pose) + yield from self._navigate_to_pose(pose, skip_obstacle_update=skip_obstacle_update) def _navigate_to_pose_direct(self, pose_2d, low_precision=False): """ @@ -1765,8 +1747,7 @@ def _sample_pose_near_object( Args: obj (StatefulObject or list of StatefulObject): object(s) to sample a 2d pose near - eef_pose (Iterable or list of Iterable): (pos, quat) Pose(s) to sample near. - If provided, must match the length of obj list if obj is a list + eef_pose (Tuple[th.tensor, th.tensor]): target pose to reach for the default end-effector in the world frame Returns: 2-tuple: @@ -1877,7 +1858,7 @@ def _sample_position_on_aabb_side(target_obj): # ) def _sample_pose_with_object_and_predicate( - self, predicate, held_obj, target_obj, keep_orientation=False, near_poses=None, near_poses_threshold=None + self, predicate, held_obj, target_obj, world_aligned=False, near_poses=None, near_poses_threshold=None ): """ Returns a pose for the held object relative to the target object that satisfies the predicate @@ -1886,9 +1867,8 @@ def _sample_pose_with_object_and_predicate( predicate (object_states.OnTop or object_states.Inside): Relation between held object and the target object held_obj (StatefulObject): Object held by the robot target_obj (StatefulObject): Object to sample a pose relative to - keep_orientation (bool): Whether to keep the current orientation of the held object after placing. - If True, the sampled cuboid will be aligned with the current orientation of the held object. - If False, the sampled cuboid will be aligned with the base link of the held object. + world_aligned (bool): Whether to align the current world-aligned bbox to the sampled cuboid on the target_obj + If True, align the current world-aligned bbox; if False, align the base-link aligned bbox near_poses (Iterable of arrays): Poses in the world frame to sample near near_poses_threshold (float): The distance threshold to check if the sampled pose is near the poses in near_poses @@ -1900,8 +1880,8 @@ def _sample_pose_with_object_and_predicate( pred_map = {object_states.OnTop: "onTop", object_states.Inside: "inside"} for _ in range(m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_WITH_OBJECT_AND_PREDICATE): - if keep_orientation: - # Take the bbox aligned to the world frame because we want to keep the orientation + if world_aligned: + # Take the bbox in the world frame because we want to keep the x-y component of the current orientation bb_extents = held_obj.aabb_extent bb_center = held_obj.aabb_center # Compute bbox pose in the object base link frame @@ -1946,8 +1926,7 @@ def _validate_poses(self, candidate_poses, eef_pose=None, skip_obstacle_update=F Args: candidate_poses (list of arrays): Candidate 2d poses (x, y, yaw) - eef_pose (Iterable of arrays or list of Iterables): Pose(s) on the object(s) in the world frame. - Can be a single pose or list of poses. + eef_pose (Tuple[th.tensor, th.tensor]): target pose to reach for the default end-effector in the world frame skip_obstacle_update (bool): Whether to skip updating the obstacles in the motion generator Returns: @@ -1983,10 +1962,11 @@ def _validate_poses(self, candidate_poses, eef_pose=None, skip_obstacle_update=F continue if eef_pose is not None: - pose = self._get_robot_pose_from_2d_pose(candidate_poses[i]) - relative_pose = T.relative_pose_transform(*eef_pose, *pose) - if not self._target_in_reach_of_robot_relative( - relative_pose, skip_obstacle_update=skip_obstacle_update + # Use the candidate joint position as the initial joint position to update the lock joints in curobo + # This effectively moves the robot base in curobo when testing for arm reachability to the target eef_pose + candidate_joint_position = candidate_joint_positions[i] + if not self._target_in_reach_of_robot( + eef_pose, initial_joint_pos=candidate_joint_position, skip_obstacle_update=skip_obstacle_update ): invalid_results[i] = True From 13435ec7e95b7474b92e3b23d70a0e5e7b0b2e9e Mon Sep 17 00:00:00 2001 From: Chengshu Li Date: Thu, 23 Jan 2025 11:43:56 -0800 Subject: [PATCH 70/76] clean up intrinsic euler conversion, fix set pose and velocity bugs in holonomic base robot --- .../starter_semantic_action_primitives.py | 7 +- omnigibson/robots/holonomic_base_robot.py | 48 +++++----- omnigibson/robots/r1.py | 1 - omnigibson/robots/tiago.py | 1 - omnigibson/utils/transform_utils.py | 88 +++++++++++++++++++ omnigibson/utils/transform_utils_np.py | 70 +++++++++++++-- 6 files changed, 180 insertions(+), 35 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index a3cf058f7..47eaa73b5 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -1987,12 +1987,7 @@ def _get_robot_pose_from_2d_pose(self, pose_2d): base_joints = self.robot.get_joint_positions()[self.robot.base_idx] pos = th.tensor([pose_2d[0], pose_2d[1], base_joints[2]], dtype=th.float32) euler_intrinsic_xyz = th.tensor([base_joints[3], base_joints[4], pose_2d[2]], dtype=th.float32) - mat_x = T.euler2mat(th.tensor([euler_intrinsic_xyz[0], 0, 0], dtype=th.float32)) - mat_y = T.euler2mat(th.tensor([0, euler_intrinsic_xyz[1], 0], dtype=th.float32)) - mat_z = T.euler2mat(th.tensor([0, 0, euler_intrinsic_xyz[2]], dtype=th.float32)) - # intrinsic x-y-z is the same as extrinsic z-y-x - # multiply mat_z first, then mat_y, then mat_x - mat = mat_x @ mat_y @ mat_z + mat = T.euler_intrinsic2mat(euler_intrinsic_xyz) orn = T.mat2quat(mat) else: pos = th.tensor([pose_2d[0], pose_2d[1], 0.0], dtype=th.float32) diff --git a/omnigibson/robots/holonomic_base_robot.py b/omnigibson/robots/holonomic_base_robot.py index eba9baf1b..3cff80ef8 100644 --- a/omnigibson/robots/holonomic_base_robot.py +++ b/omnigibson/robots/holonomic_base_robot.py @@ -10,7 +10,6 @@ from omnigibson.controllers import JointController, HolonomicBaseJointController from omnigibson.robots.locomotion_robot import LocomotionRobot from omnigibson.robots.manipulation_robot import ManipulationRobot -from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.utils.geometry_utils import wrap_angle from omnigibson.utils.python_utils import classproperty import omnigibson.utils.transform_utils as T @@ -45,7 +44,6 @@ def __init__( relative_prim_path=None, scale=None, visible=True, - fixed_base=False, visual_only=False, self_collisions=True, load_config=None, @@ -71,7 +69,6 @@ def __init__( for this object. A single number corresponds to uniform scaling along the x,y,z axes, whereas a 3-array specifies per-axis scaling. visible (bool): whether to render this object or not in the stage - fixed_base (bool): whether to fix the base of this object or not visual_only (bool): Whether this object should be visual only (and not collide with any other objects) self_collisions (bool): Whether to enable self collisions for this object load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for @@ -117,7 +114,7 @@ def __init__( relative_prim_path=relative_prim_path, scale=scale, visible=visible, - fixed_base=fixed_base, + fixed_base=True, visual_only=visual_only, self_collisions=self_collisions, load_config=load_config, @@ -296,19 +293,15 @@ def set_position_orientation(self, position=None, orientation=None, frame: Liter # ("base_footprint_x") frame. Assign it to the 6 1DoF joints that control the base. # Note that the 6 1DoF joints are originated from the root_link ("base_footprint_x") frame. joint_pos, joint_orn = self.root_link.get_position_orientation() - joint_pos, joint_orn = cb.from_torch(joint_pos), cb.from_torch(joint_orn) - inv_joint_pos, inv_joint_orn = cb.T.mat2pose(cb.T.pose_inv(cb.T.pose2mat((joint_pos, joint_orn)))) - - relative_pos, relative_orn = cb.T.pose_transform( - inv_joint_pos, inv_joint_orn, cb.from_torch(position), cb.from_torch(orientation) - ) - relative_rpy = cb.T.quat2euler(relative_orn) + inv_joint_pos, inv_joint_orn = T.invert_pose_transform(joint_pos, joint_orn) + relative_pos, relative_orn = T.pose_transform(inv_joint_pos, inv_joint_orn, position, orientation) + intrinsic_eulers = T.mat2euler_intrinsic(T.quat2mat(relative_orn)) self.joints["base_footprint_x_joint"].set_pos(relative_pos[0], drive=False) self.joints["base_footprint_y_joint"].set_pos(relative_pos[1], drive=False) self.joints["base_footprint_z_joint"].set_pos(relative_pos[2], drive=False) - self.joints["base_footprint_rx_joint"].set_pos(relative_rpy[0], drive=False) - self.joints["base_footprint_ry_joint"].set_pos(relative_rpy[1], drive=False) - self.joints["base_footprint_rz_joint"].set_pos(relative_rpy[2], drive=False) + self.joints["base_footprint_rx_joint"].set_pos(intrinsic_eulers[0], drive=False) + self.joints["base_footprint_ry_joint"].set_pos(intrinsic_eulers[1], drive=False) + self.joints["base_footprint_rz_joint"].set_pos(intrinsic_eulers[2], drive=False) # Else, set the pose of the robot frame, and then move the joint frame of the world_base_joint to match it else: @@ -325,8 +318,8 @@ def set_linear_velocity(self, velocity: th.Tensor): # Transform the desired linear velocity from the world frame to the root_link ("base_footprint_x") frame # Note that this will also set the target to be the desired linear velocity (i.e. the robot will try to maintain # such velocity), which is different from the default behavior of set_linear_velocity for all other objects. - orn = cb.from_torch(self.root_link.get_position_orientation()[1]) - velocity_in_root_link = cb.T.quat2mat(orn).T @ cb.from_torch(velocity) + orn = self.root_link.get_position_orientation()[1] + velocity_in_root_link = T.quat2mat(orn).T @ velocity self.joints["base_footprint_x_joint"].set_vel(velocity_in_root_link[0], drive=False) self.joints["base_footprint_y_joint"].set_vel(velocity_in_root_link[1], drive=False) self.joints["base_footprint_z_joint"].set_vel(velocity_in_root_link[2], drive=False) @@ -336,12 +329,23 @@ def get_linear_velocity(self) -> th.Tensor: return self.base_footprint_link.get_linear_velocity() def set_angular_velocity(self, velocity: th.Tensor) -> None: - # See comments of self.set_linear_velocity - orn = cb.from_torch(self.root_link.get_position_orientation()[1]) - velocity_in_root_link = cb.T.quat2mat(orn).T @ cb.from_torch(velocity) - self.joints["base_footprint_rx_joint"].set_vel(velocity_in_root_link[0], drive=False) - self.joints["base_footprint_ry_joint"].set_vel(velocity_in_root_link[1], drive=False) - self.joints["base_footprint_rz_joint"].set_vel(velocity_in_root_link[2], drive=False) + # 1e-3 is emperically tuned to be a good value for the time step + delta_t = 1e-3 / (velocity.norm() + 1e-6) + delta_mat = T.delta_rotation_matrix(velocity, delta_t) + base_link_orn = self.get_position_orientation()[1] + rot_mat = T.quat2mat(base_link_orn) + desired_mat = delta_mat @ rot_mat + root_link_orn = self.root_link.get_position_orientation()[1] + desired_mat_in_root_link = T.quat2mat(root_link_orn).T @ desired_mat + desired_intrinsic_eulers = T.mat2euler_intrinsic(desired_mat_in_root_link) + + cur_joint_pos = self.get_joint_positions()[self.base_idx[3:]] + delta_intrinsic_eulers = desired_intrinsic_eulers - cur_joint_pos + velocity_intrinsic = delta_intrinsic_eulers / delta_t + + self.joints["base_footprint_rx_joint"].set_vel(velocity_intrinsic[0], drive=False) + self.joints["base_footprint_ry_joint"].set_vel(velocity_intrinsic[1], drive=False) + self.joints["base_footprint_rz_joint"].set_vel(velocity_intrinsic[2], drive=False) def get_angular_velocity(self) -> th.Tensor: # Note that the link we are interested in is self.base_footprint_link, not self.root_link diff --git a/omnigibson/robots/r1.py b/omnigibson/robots/r1.py index 37990888a..b9ca8259e 100644 --- a/omnigibson/robots/r1.py +++ b/omnigibson/robots/r1.py @@ -96,7 +96,6 @@ def __init__( name=name, scale=scale, visible=visible, - fixed_base=True, visual_only=visual_only, self_collisions=self_collisions, load_config=load_config, diff --git a/omnigibson/robots/tiago.py b/omnigibson/robots/tiago.py index ce506cf42..53a72ad07 100644 --- a/omnigibson/robots/tiago.py +++ b/omnigibson/robots/tiago.py @@ -111,7 +111,6 @@ def __init__( name=name, scale=scale, visible=visible, - fixed_base=True, visual_only=visual_only, self_collisions=self_collisions, load_config=load_config, diff --git a/omnigibson/utils/transform_utils.py b/omnigibson/utils/transform_utils.py index 129762cbe..dd167e4fe 100644 --- a/omnigibson/utils/transform_utils.py +++ b/omnigibson/utils/transform_utils.py @@ -1408,3 +1408,91 @@ def orientation_error(desired, current): error = 0.5 * (torch.linalg.cross(rc1, rd1) + torch.linalg.cross(rc2, rd2) + torch.linalg.cross(rc3, rd3)) return error.reshape(desired.shape[:-2] + (3,)) + + +@torch.compile +def delta_rotation_matrix(omega, delta_t): + """ + Compute the delta rotation matrix given angular velocity and time elapsed. + + Arguments: + omega (torch.tensor): Angular velocity vector [omega_x, omega_y, omega_z]. + delta_t (float): Time elapsed. + + Returns: + torch.tensor: 3x3 Delta rotation matrix. + """ + # Magnitude of angular velocity (angular speed) + omega_magnitude = torch.linalg.norm(omega) + + # If angular speed is zero, return identity matrix + if omega_magnitude == 0: + return torch.eye(3) + + # Rotation angle + theta = omega_magnitude * delta_t + + # Normalized axis of rotation + axis = omega / omega_magnitude + + # Skew-symmetric matrix K + u_x, u_y, u_z = axis + K = torch.tensor([[0, -u_z, u_y], [u_z, 0, -u_x], [-u_y, u_x, 0]]) + + # Rodrigues' rotation formula + R = torch.eye(3) + torch.sin(theta) * K + (1 - torch.cos(theta)) * (K @ K) + + return R + + +@torch.compile +def mat2euler_intrinsic(rmat): + """ + Converts given rotation matrix to intrinsic euler angles in radian. + + Parameters: + rmat (torch.tensor): 3x3 rotation matrix + + Returns: + torch.array: (r,p,y) converted intrinsic euler angles in radian vec3 float + """ + # Check for gimbal lock (pitch = +-90 degrees) + if abs(rmat[0, 2]) != 1: + # General case + pitch = torch.arcsin(rmat[0, 2]) + roll = torch.arctan2(-rmat[1, 2], rmat[2, 2]) + yaw = torch.arctan2(-rmat[0, 1], rmat[0, 0]) + else: + # Gimbal lock case + pitch = math.pi / 2 if rotation_matrix[0, 2] == 1 else -math.pi / 2 + roll = torch.arctan2(rotation_matrix[1, 0], rotation_matrix[1, 1]) + yaw = 0 # Can set yaw to 0 in gimbal lock + + return torch.tensor([roll, pitch, yaw], dtype=torch.float32) + + +@torch.compile +def euler_intrinsic2mat(euler): + """ + Converts intrinsic euler angles into rotation matrix form + + Parameters: + euler (torch.tensor): (r,p,y) intrinsic euler angles in radian vec3 float + + Returns: + torch.tensor: 3x3 rotation matrix + """ + roll, pitch, yaw = euler + # Rotation matrix around X-axis + Rx = torch.tensor([[1, 0, 0], [0, torch.cos(roll), -torch.sin(roll)], [0, torch.sin(roll), torch.cos(roll)]]) + + # Rotation matrix around Y-axis + Ry = torch.tensor([[torch.cos(pitch), 0, torch.sin(pitch)], [0, 1, 0], [-torch.sin(pitch), 0, torch.cos(pitch)]]) + + # Rotation matrix around Z-axis + Rz = torch.tensor([[torch.cos(yaw), -torch.sin(yaw), 0], [torch.sin(yaw), torch.cos(yaw), 0], [0, 0, 1]]) + + # Combine the rotation matrices + # Intrinsic x-y-z is the same as extrinsic z-y-x + # Multiply Rz first, then Ry, then Rx + return Rx @ Ry @ Rz diff --git a/omnigibson/utils/transform_utils_np.py b/omnigibson/utils/transform_utils_np.py index 84ca95724..12e4a9a40 100644 --- a/omnigibson/utils/transform_utils_np.py +++ b/omnigibson/utils/transform_utils_np.py @@ -526,7 +526,7 @@ def vec2quat(vec, up=(0, 0, 1.0)): def euler2quat(euler): """ - Converts euler angles into quaternion form + Converts extrinsic euler angles into quaternion form Args: euler (np.array): (r,p,y) angles @@ -542,7 +542,7 @@ def euler2quat(euler): def quat2euler(quat): """ - Converts euler angles into quaternion form + Converts extrinsic euler angles into quaternion form Args: quat (np.array): (x,y,z,w) float quaternion angles @@ -558,7 +558,7 @@ def quat2euler(quat): def euler2mat(euler): """ - Converts euler angles into rotation matrix form + Converts extrinsic euler angles into rotation matrix form Args: euler (np.array): (r,p,y) angles @@ -578,13 +578,13 @@ def euler2mat(euler): def mat2euler(rmat): """ - Converts given rotation matrix to euler angles in radian. + Converts given rotation matrix to extrinsic euler angles in radian. Args: rmat (np.array): 3x3 rotation matrix Returns: - np.array: (r,p,y) converted euler angles in radian vec3 float + np.array: (r,p,y) converted extrinsic euler angles in radian vec3 float """ M = np.array(rmat, dtype=np.float32, copy=False)[:3, :3] return R.from_matrix(M).as_euler("xyz") @@ -1276,3 +1276,63 @@ def orientation_error(desired, current): error = error.reshape(*input_shape, 3) return error + + +def delta_rotation_matrix(omega, delta_t): + """ + Compute the delta rotation matrix given angular velocity and time elapsed. + + Arguments: + omega (np.array): Angular velocity vector [omega_x, omega_y, omega_z]. + delta_t (float): Time elapsed. + + Returns: + np.array: 3x3 Delta rotation matrix. + """ + # Magnitude of angular velocity (angular speed) + omega_magnitude = np.linalg.norm(omega) + + # If angular speed is zero, return identity matrix + if omega_magnitude == 0: + return np.eye(3) + + # Rotation angle + theta = omega_magnitude * delta_t + + # Normalized axis of rotation + axis = omega / omega_magnitude + + # Skew-symmetric matrix K + u_x, u_y, u_z = axis + K = np.array([[0, -u_z, u_y], [u_z, 0, -u_x], [-u_y, u_x, 0]]) + + # Rodrigues' rotation formula + R = np.eye(3) + np.sin(theta) * K + (1 - np.cos(theta)) * (K @ K) + + return R + + +def mat2euler_intrinsic(rmat): + """ + Converts given rotation matrix to intrinsic euler angles in radian. + + Parameters: + rmat (np.array): 3x3 rotation matrix + + Returns: + np.array: (r,p,y) converted intrinsic euler angles in radian vec3 float + """ + return R.from_matrix(rmat).as_euler("XYZ") + + +def euler_intrinsic2mat(euler): + """ + Converts intrinsic euler angles into rotation matrix form + + Parameters: + euler (np.array): (r,p,y) angles + + Returns: + np.array: 3x3 rotation matrix + """ + return R.from_euler("XYZ", euler).as_matrix() From 4c97748e2c524939f4a6fbaf43a2bec68066a736 Mon Sep 17 00:00:00 2001 From: Chengshu Li Date: Thu, 23 Jan 2025 12:34:40 -0800 Subject: [PATCH 71/76] during placing, plan with open gripper to prepare for the subsequent execute_release --- .../starter_semantic_action_primitives.py | 65 +++++++++++++------ 1 file changed, 46 insertions(+), 19 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 47eaa73b5..83b0f3086 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -132,7 +132,6 @@ class StarterSemanticActionPrimitiveSet(IntEnum): # (TODO) execution failure: overwrite_head_action might move the head that causes self-collision with curobo-generated motion plans (at the time of curobo planning, it assumes the head doesn't move). -# (TODO) planning failure: after placing an object and opening the gripper, the robot might fail to plan during the subsequent reset_robot() because the opened gripper fingers collides with the world. class StarterSemanticActionPrimitives(BaseActionPrimitiveSet): @@ -710,14 +709,19 @@ def _place_with_predicate(self, obj, predicate): hand_pose = self._get_hand_pose_for_object_pose(obj_pose) # First check if we can directly move the hand there - target_in_reach = self._target_in_reach_of_robot(hand_pose, skip_obstacle_update=True) + # We want to plan with the fingers at their upper (open) limits to avoid collisions + # because later we will open-loop open the gripper with _execute_release after placing. + initial_joint_pos = self._get_joint_position_with_fingers_at_limit("upper") + target_in_reach = self._target_in_reach_of_robot( + hand_pose, initial_joint_pos=initial_joint_pos, skip_obstacle_update=True + ) if target_in_reach: yield from self._move_hand(hand_pose) break # If not, try to find a valid navigation pose for this hand pose valid_navigation_pose = self._sample_pose_near_object( - obj, eef_pose=hand_pose, sampling_attempts=10, skip_obstacle_update=True + obj, eef_pose=hand_pose, plan_with_open_gripper=True, sampling_attempts=10, skip_obstacle_update=True ) if valid_navigation_pose is not None: @@ -934,6 +938,7 @@ def _plan_joint_motion( successes, traj_paths = self._motion_generator.compute_trajectories( target_pos=target_pos, target_quat=target_quat, + initial_joint_pos=None, is_local=False, max_attempts=math.ceil(m.MAX_PLANNING_ATTEMPTS / self._motion_generator.batch_size), timeout=60.0, @@ -1301,15 +1306,15 @@ def _move_hand_linearly_cartesian( "Your hand was obstructed from moving to the desired world position", ) - def _move_fingers_to_limit(self, limit_type): + def _get_joint_position_with_fingers_at_limit(self, limit_type): """ - Helper function to move the robot's fingers to their limit positions. + Helper function to get the joint positions when the robot's fingers are at their limit positions. Args: limit_type (str): Either 'lower' for grasping or 'upper' for releasing. Yields: - th.tensor or None: Action array for one step for the robot to move fingers or None if done. + th.tensor: Joint positions for the robot with fingers at their limit positions. """ target_joint_positions = self.robot.get_joint_positions() gripper_ctrl_idx = self.robot.gripper_control_idx[self.arm] @@ -1318,10 +1323,23 @@ def _move_fingers_to_limit(self, limit_type): else: finger_joint_limits = self.robot.joint_upper_limits[gripper_ctrl_idx] target_joint_positions[gripper_ctrl_idx] = finger_joint_limits + return target_joint_positions + + def _move_fingers_to_limit(self, limit_type): + """ + Helper function to move the robot's fingers to their limit positions. + + Args: + limit_type (str): Either 'lower' for grasping or 'upper' for releasing. + + Yields: + th.tensor or None: Action array for one step for the robot to move fingers or None if done. + """ + target_joint_positions = self._get_joint_position_with_fingers_at_limit(limit_type) action = self.robot.q_to_action(target_joint_positions) for _ in range(m.MAX_STEPS_FOR_GRASP_OR_RELEASE): - finger_joint_positions = self.robot.get_joint_positions()[gripper_ctrl_idx] - if th.allclose(finger_joint_positions, finger_joint_limits, atol=0.005): + current_joint_positinos = self.robot.get_joint_positions() + if th.allclose(current_joint_positinos, target_joint_positions, atol=0.005): break elif limit_type == "lower" and self._get_obj_in_hand() is not None: # If we are grasping an object, we should stop when object is detected in hand @@ -1583,7 +1601,7 @@ def _draw_plan(self, plan): cv2.imshow("SceneGraph", img) cv2.waitKey(1) - def _navigate_if_needed(self, obj, eef_pose=None, **kwargs): + def _navigate_if_needed(self, obj, eef_pose=None): """ Yields action to navigate the robot to be in range of the object if it not in the range @@ -1603,9 +1621,9 @@ def _navigate_if_needed(self, obj, eef_pose=None, **kwargs): # No need to navigate. return - yield from self._navigate_to_obj(obj, eef_pose=eef_pose, skip_obstacle_update=True, **kwargs) + yield from self._navigate_to_obj(obj, eef_pose=eef_pose, skip_obstacle_update=True) - def _navigate_to_obj(self, obj, eef_pose=None, skip_obstacle_update=False, **kwargs): + def _navigate_to_obj(self, obj, eef_pose=None, skip_obstacle_update=False): """ Yields action to navigate the robot to be in range of the pose @@ -1616,9 +1634,7 @@ def _navigate_to_obj(self, obj, eef_pose=None, skip_obstacle_update=False, **kwa Returns: th.tensor or None: Action array for one step for the robot to navigate in range or None if it is done navigating """ - pose = self._sample_pose_near_object( - obj, eef_pose=eef_pose, skip_obstacle_update=skip_obstacle_update, **kwargs - ) + pose = self._sample_pose_near_object(obj, eef_pose=eef_pose, skip_obstacle_update=skip_obstacle_update) if pose is None: raise ActionPrimitiveError( ActionPrimitiveError.Reason.PLANNING_ERROR, @@ -1738,9 +1754,9 @@ def _sample_pose_near_object( self, obj, eef_pose=None, + plan_with_open_gripper=False, sampling_attempts=m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT, skip_obstacle_update=False, - **kwargs, ): """ Returns a 2d pose for the robot within in the range of the object and where the robot is not in collision with anything @@ -1748,7 +1764,10 @@ def _sample_pose_near_object( Args: obj (StatefulObject or list of StatefulObject): object(s) to sample a 2d pose near eef_pose (Tuple[th.tensor, th.tensor]): target pose to reach for the default end-effector in the world frame - + plan_with_open_gripper (bool): Determines whether to plan with the gripper open even though it might be closed now. + This is useful for the placing primitive because we will open-loop open the gripper after placing + sampling_attempts (int): Number of attempts to sample a valid pose + skip_obstacle_update (bool): Determines whether to skip updating the obstacles in the scene Returns: 2-tuple: - 3-array: (x,y,z) Position in the world frame @@ -1795,7 +1814,10 @@ def _sample_pose_near_object( # We skip the following steps if the list is empty. if len(candidate_poses) > 0: result = self._validate_poses( - candidate_poses, eef_pose=target_pose, skip_obstacle_update=True, **kwargs + candidate_poses, + eef_pose=target_pose, + plan_with_open_gripper=plan_with_open_gripper, + skip_obstacle_update=True, ) # If anything in result is true, return the pose @@ -1920,13 +1942,15 @@ def _sample_pose_with_object_and_predicate( {"target object": target_obj.name, "object in hand": held_obj.name, "relation": pred_map[predicate]}, ) - def _validate_poses(self, candidate_poses, eef_pose=None, skip_obstacle_update=False): + def _validate_poses(self, candidate_poses, eef_pose=None, plan_with_open_gripper=False, skip_obstacle_update=False): """ Determines whether the robot can reach all poses on the objects and is not in collision at the specified 2d poses Args: candidate_poses (list of arrays): Candidate 2d poses (x, y, yaw) eef_pose (Tuple[th.tensor, th.tensor]): target pose to reach for the default end-effector in the world frame + plan_with_open_gripper (bool): Determines whether to plan with the gripper open even though it might be closed now. + This is useful for the placing primitive because we will open-loop open the gripper after placing skip_obstacle_update (bool): Whether to skip updating the obstacles in the motion generator Returns: @@ -1935,7 +1959,10 @@ def _validate_poses(self, candidate_poses, eef_pose=None, skip_obstacle_update=F """ # First check collisions for all candidate poses candidate_joint_positions = [] - current_joint_pos = self.robot.get_joint_positions() + if plan_with_open_gripper: + current_joint_pos = self._get_joint_position_with_fingers_at_limit("upper") + else: + current_joint_pos = self.robot.get_joint_positions() for pose in candidate_poses: joint_pos = current_joint_pos.clone() joint_pos[self.robot.base_control_idx] = pose From 10e7f39c59004fd31567e2c51bcde5a0be71942c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cem=20G=C3=B6kmen?= <1408354+cgokmen@users.noreply.github.com> Date: Thu, 23 Jan 2025 12:50:13 -0800 Subject: [PATCH 72/76] Use upload artifacts v4 --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index e125c307e..cdda12e08 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -60,7 +60,7 @@ jobs: continue-on-error: true - name: Deploy artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: ${{ github.run_id }}-tests-${{ matrix.test_file }} path: ${{ matrix.test_file }}.xml From 16adfbe961185eb5d03898875a9003e841fd0ad0 Mon Sep 17 00:00:00 2001 From: Chengshu Li Date: Thu, 23 Jan 2025 16:10:55 -0800 Subject: [PATCH 73/76] fix curobo commit hash, slightly improve primitive test --- docker/prod.Dockerfile | 2 +- setup.py | 2 +- tests/test_primitives.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docker/prod.Dockerfile b/docker/prod.Dockerfile index ca3e23653..ee83c8425 100644 --- a/docker/prod.Dockerfile +++ b/docker/prod.Dockerfile @@ -44,7 +44,7 @@ RUN wget --no-verbose -O /cuda-keyring.deb https://developer.download.nvidia.com DEBIAN_FRONTEND=noninteractive apt-get install -y cuda-toolkit-11-8 && \ TORCH_CUDA_ARCH_LIST='7.5;8.0;8.6+PTX' PATH=/usr/local/cuda-11.8/bin:$PATH LD_LIBRARY_PATH=/usr/local/cuda-11.8/lib64:$LD_LIBRARY_PATH \ micromamba run -n omnigibson pip install \ - git+https://github.com/StanfordVL/curobo@6a4eb2ca8677829b0f57451ad107e0a3186525e9#egg=nvidia_curobo \ + git+https://github.com/StanfordVL/curobo@cbaf7d32436160956dad190a9465360fad6aba73#egg=nvidia_curobo \ --no-build-isolation > /dev/null && \ apt-get remove -y cuda-toolkit-11-8 && apt-get autoremove -y && apt-get autoclean -y && rm -rf /var/lib/apt/lists/* diff --git a/setup.py b/setup.py index 0731b1f68..5bd3e7aa0 100644 --- a/setup.py +++ b/setup.py @@ -68,7 +68,7 @@ "telemoma~=0.1.2", ], "primitives": [ - "nvidia-curobo @ git+https://github.com/StanfordVL/curobo@dc52be668a8d0b426b8639de3c8d6443e58cc348", + "nvidia-curobo @ git+https://github.com/StanfordVL/curobo@cbaf7d32436160956dad190a9465360fad6aba73", "ompl @ https://storage.googleapis.com/gibson_scenes/ompl-1.6.0-cp310-cp310-manylinux_2_28_x86_64.whl", ], }, diff --git a/tests/test_primitives.py b/tests/test_primitives.py index aecfa5c42..11e733f6c 100644 --- a/tests/test_primitives.py +++ b/tests/test_primitives.py @@ -131,7 +131,7 @@ def test_place(self, robot): objects = [] obj_1 = { "object": DatasetObject(name="table", category="breakfast_table", model="rjgmmy", scale=[0.3, 0.3, 0.3]), - "position": [-0.7, 0.5, 0.2], + "position": [-0.7, 0.5, 0.09], "orientation": [0, 0, 0, 1], } obj_2 = { From bda9ea1cd97f418734da880368f8fc316393e796 Mon Sep 17 00:00:00 2001 From: Chengshu Li Date: Thu, 23 Jan 2025 17:20:47 -0800 Subject: [PATCH 74/76] uncomment th.jit.script --- omnigibson/utils/python_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/omnigibson/utils/python_utils.py b/omnigibson/utils/python_utils.py index 0dfb8f668..b5aac7e9e 100644 --- a/omnigibson/utils/python_utils.py +++ b/omnigibson/utils/python_utils.py @@ -810,7 +810,7 @@ def h5py_group_to_torch(group): return state -# @th.jit.script +@th.jit.script def multi_dim_linspace(start: th.Tensor, stop: th.Tensor, num: int, endpoint: bool = True) -> th.Tensor: """ Generate a tensor with evenly spaced values along multiple dimensions. From a5ce569092548a7e78cf7acfb2b0506cf47a37c1 Mon Sep 17 00:00:00 2001 From: Chengshu Li Date: Fri, 24 Jan 2025 11:00:39 -0800 Subject: [PATCH 75/76] add a few more TODO items for primitives --- .../starter_semantic_action_primitives.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 83b0f3086..13da95d76 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -132,6 +132,7 @@ class StarterSemanticActionPrimitiveSet(IntEnum): # (TODO) execution failure: overwrite_head_action might move the head that causes self-collision with curobo-generated motion plans (at the time of curobo planning, it assumes the head doesn't move). +# (TODO) R1 improvement: create a new CuRoboEmbodimentSelection type that only allows for arm movement, and not torso movement. class StarterSemanticActionPrimitives(BaseActionPrimitiveSet): @@ -604,9 +605,6 @@ def _grasp(self, obj): {"target object": obj.name}, ) - # indented_print("Moving hand back") - # yield from self._reset_hand() - indented_print("Done with grasp") if self._get_obj_in_hand() != obj: @@ -752,9 +750,6 @@ def _place_with_predicate(self, obj, predicate): {"dropped object": obj_in_hand.name, "target object": obj.name}, ) - # indented_print("Moving hand back") - # yield from self._reset_hand() - def _convert_cartesian_to_joint_space(self, target_pose): """ Gets joint positions for the arm so eef is at the target pose @@ -970,6 +965,7 @@ def _plan_joint_motion( traj_path, get_full_js=True, emb_sel=embodiment_selection ).cpu() + # (TODO) investigate why this is necessary to prevent jerky motion during execution # Smooth out the trajectory q_traj = th.stack(self._add_linearly_interpolated_waypoints(plan=q_traj, max_inter_dist=0.01)) From d5a8c54b9b411f6e2c2011fe32a169f4ef682151 Mon Sep 17 00:00:00 2001 From: Chengshu Li Date: Fri, 24 Jan 2025 11:09:27 -0800 Subject: [PATCH 76/76] add onre more TODO --- .../action_primitives/starter_semantic_action_primitives.py | 1 + 1 file changed, 1 insertion(+) diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 13da95d76..7cb5a235b 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -1573,6 +1573,7 @@ def _navigate_to_pose(self, pose_2d, skip_obstacle_update=False): ) yield from self._execute_motion_plan(q_traj) + # (TODO) add a function to draw curobo-generated plans. def _draw_plan(self, plan): SEARCHED = [] trav_map = self.robot.scene._trav_map