diff --git a/.github/workflows/new-pycram-ci.yml b/.github/workflows/new-pycram-ci.yml index 7ad8b1486..789d297d9 100644 --- a/.github/workflows/new-pycram-ci.yml +++ b/.github/workflows/new-pycram-ci.yml @@ -60,6 +60,7 @@ jobs: pip3 install --upgrade pip --root-user-action=ignore cd /opt/ros/overlay_ws/src/pycram pip3 install -r requirements.txt + sudo apt-get install -y libpq-dev pip3 install -r requirements-resolver.txt - name: Install pytest & pyjpt diff --git a/.github/workflows/notebook-test-ci.yml b/.github/workflows/notebook-test-ci.yml new file mode 100644 index 000000000..853904a5c --- /dev/null +++ b/.github/workflows/notebook-test-ci.yml @@ -0,0 +1,83 @@ +name: Test Jupyter Notebook Examples + +on: + push: + branches: + - dev + - master + pull_request: + branches: + - master + - dev + +# ---------------------------------------------------------------------------------------------------------------------- + +defaults: + run: + shell: bash + working-directory: . + +jobs: + test-and-build: + name: Test Jupyter Notebooks + runs-on: ubuntu-20.04 + container: + image: "pycram/pycram:dev" + + steps: + + - name: Checkout PyCRAM + uses: actions/checkout@v3 + with: + path: "ros/src/pycram" + repository: ${{ github.repository }} + ref: ${{ github.ref }} + submodules: "recursive" + + # For debugging + - name: Setup tmate session + uses: mxschmitt/action-tmate@v3 + if: ${{ github.event_name == 'workflow_dispatch' && inputs.debug_enabled }} + + - name: Update PyCRAM source files + run: | + rm -rf /opt/ros/overlay_ws/src/pycram/* + cd /opt/ros/overlay_ws/src/pycram + rm -rf .git .github .gitignore .gitmodules .readthedocs.yaml + cp -r /__w/${{ github.event.repository.name }}/${{ github.event.repository.name }}/ros/src/pycram /opt/ros/overlay_ws/src + + # ---------------------------------------------------------------------------------------------------------------- + + - name: Install python dependencies + run: | + pip3 install --upgrade pip --root-user-action=ignore + cd /opt/ros/overlay_ws/src/pycram + pip3 install -r requirements.txt + sudo apt-get install -y libpq-dev + pip3 install -r requirements-resolver.txt + + # ---------------------------------------------------------------------------------------------------------------- + + - name: Install Jupytext & Jupyter notebook + run: | + pip3 install jupytext treon + + # ---------------------------------------------------------------------------------------------------------------- + - name: Convert Notebooks + run: | + cd /opt/ros/overlay_ws/src/pycram/examples + rm -rf tmp + mkdir tmp + jupytext --to notebook *.md + mv *.ipynb tmp && cd tmp + + - name: Source and start ROS + run: | + source /opt/ros/overlay_ws/devel/setup.bash + roslaunch pycram ik_and_description.launch & + + - name: Run tests + run: | + source /opt/ros/overlay_ws/devel/setup.bash + roscd pycram/examples/tmp + treon --thread 1 -v --exclude=migrate_neems.ipynb \ No newline at end of file diff --git a/examples/action_designator.md b/examples/action_designator.md index 4988e4f8b..e0a4babf9 100644 --- a/examples/action_designator.md +++ b/examples/action_designator.md @@ -5,14 +5,13 @@ jupyter: extension: .md format_name: markdown format_version: '1.3' - jupytext_version: 1.16.2 + jupytext_version: 1.16.3 kernelspec: - display_name: Python 3 + display_name: Python 3 (ipykernel) language: python name: python3 --- - # Action Designator @@ -29,7 +28,7 @@ Designator Description will then pick one of the poses and return a performable picked pose. - + ## Navigate Action @@ -41,9 +40,12 @@ First, we need a BulletWorld with a robot. from pycram.worlds.bullet_world import BulletWorld from pycram.world_concepts.world_object import Object from pycram.datastructures.enums import ObjectType, WorldMode +from pycram.datastructures.pose import Pose world = BulletWorld(WorldMode.GUI) -pr2 = Object("pr2", ObjectType.ROBOT, "pr2.urdf") +pr2 = Object("pr2", ObjectType.ROBOT, "pr2.urdf", pose=Pose([1, 2, 0])) +apartmet = Object("apartment", ObjectType.ENVIRONMENT, "apartment.urdf") +milk = Object("milk", ObjectType.MILK, "milk.stl", pose=Pose([2.3, 2, 1.1])) ``` To move the robot we need to create a description and resolve it to an actual Designator. The description of navigation @@ -53,7 +55,7 @@ only needs a list of possible poses. from pycram.designators.action_designator import NavigateAction from pycram.datastructures.pose import Pose -pose = Pose([1, 0, 0], [0, 0, 0, 1]) +pose = Pose([1.3, 2, 0], [0, 0, 0, 1]) # This is the Designator Description navigate_description = NavigateAction(target_locations=[pose]) @@ -141,16 +143,11 @@ the example on object designators for more details. To start we need an environment in which we can pick up and place things as well as an object to pick up. ```python -kitchen = Object("kitchen", ObjectType.ENVIRONMENT, "kitchen.urdf") -milk = Object("milk", ObjectType.MILK, "milk.stl", pose=Pose([1.3, 1, 0.9])) - world.reset_world() ``` ```python -from pycram.designators.action_designator import PickUpAction, PlaceAction, ParkArmsAction, MoveTorsoAction, - -NavigateAction +from pycram.designators.action_designator import PickUpAction, PlaceAction, ParkArmsAction, MoveTorsoAction,NavigateAction from pycram.designators.object_designator import BelieveObject from pycram.process_module import simulated_robot from pycram.datastructures.enums import Arms, Grasp @@ -164,37 +161,33 @@ with simulated_robot: MoveTorsoAction([0.3]).resolve().perform() - NavigateAction([Pose([0.78, 1, 0.0], - [0.0, 0.0, 0.014701099828940344, 0.9998919329926708])]).resolve().perform() + NavigateAction([Pose([1.8, 2, 0.0], + [0.0, 0.0, 0., 1])]).resolve().perform() PickUpAction(object_designator_description=milk_desig, arms=[arm], grasps=[Grasp.RIGHT]).resolve().perform() - NavigateAction([Pose([-1.90, 0.78, 0.0], - [0.0, 0.0, 0.16439898301071468, 0.9863939245479175])]).resolve().perform() - PlaceAction(object_designator_description=milk_desig, - target_locations=[Pose([-1.20, 1.0192, 0.9624], - # [0.0, 0.0, 0.6339889056055381, 0.7733421413379024])], + target_locations=[Pose([2.4, 1.8, 1], [0, 0, 0, 1])], arms=[arm]).resolve().perform() ``` -```python -world.reset_world() -``` - ## Look At Look at lets the robot look at a specific point, for example if it should look at an object for detecting. +```python +world.reset_world() +``` + ```python from pycram.designators.action_designator import LookAtAction from pycram.process_module import simulated_robot from pycram.datastructures.pose import Pose -target_location = Pose([1, 0, 0.5], [0, 0, 0, 1]) +target_location = Pose([3, 2, 0.5], [0, 0, 0, 1]) with simulated_robot: LookAtAction(targets=[target_location]).resolve().perform() ``` @@ -206,7 +199,7 @@ up/place example, if you didn't execute that example you can spawn the milk with designator will return a resolved instance of an ObjectDesignatorDescription. ```python -milk = Object("milk", ObjectType.MILK, "milk.stl", pose=Pose([1.3, 1, 0.9])) +world.reset_world() ``` ```python @@ -221,7 +214,7 @@ milk_desig = BelieveObject(names=["milk"]) with simulated_robot: ParkArmsAction([Arms.BOTH]).resolve().perform() - NavigateAction([Pose([0, 1, 0], [0, 0, 0, 1])]).resolve().perform() + NavigateAction([Pose([1.7, 2, 0], [0, 0, 0, 1])]).resolve().perform() LookAtAction(targets=[milk_desig.resolve().pose]).resolve().perform() @@ -237,8 +230,7 @@ Place plan used in the Pick-up and Place example. Since we need an Object which don't need to do this if you already have spawned it in a previous example. ```python -kitchen = Object("kitchen", ObjectType.ENVIRONMENT, "kitchen.urdf") -milk = Object("milk", ObjectType.MILK, "milk.stl", pose=Pose([1.3, 1, 0.9])) +world.reset_world() ``` ```python @@ -252,8 +244,8 @@ milk_desig = BelieveObject(names=["milk"]) description = TransportAction(milk_desig, [Arms.LEFT], - [Pose([-1.35, 0.78, 0.95], - [0.0, 0.0, 0.16439898301071468, 0.9863939245479175])]) + [Pose([2.4, 1.8, 1], + [0, 0, 0, 1])]) with simulated_robot: MoveTorsoAction([0.2]).resolve().perform() description.resolve().perform() @@ -268,11 +260,7 @@ For the moment this designator works only in the apartment environment, therefor apartment. ```python -kitchen.remove() -``` - -```python -apartment = Object("apartment", ObjectType.ENVIRONMENT, "apartment.urdf") +world.reset_world() ``` ```python @@ -301,15 +289,6 @@ describing the handle to be grasped. This action designator only works in the apartment environment for the moment, therefore we remove the kitchen and spawn the apartment. Additionally, we open the drawer such that we can close it with the action designator. -```python -kitchen.remove() -``` - -```python -apartment = Object("apartment", ObjectType.ENVIRONMENT, "apartment.urdf") -apartment.set_joint_state("cabinet10_drawer_top_joint", 0.4) -``` - ```python from pycram.designators.action_designator import * from pycram.designators.object_designator import * diff --git a/examples/cram_plan_tutorial.md b/examples/cram_plan_tutorial.md index 7ca106a34..688db502b 100644 --- a/examples/cram_plan_tutorial.md +++ b/examples/cram_plan_tutorial.md @@ -1,4 +1,4 @@ ---- +from pycram.robot_description import RobotDescriptionfrom pycram.datastructures.enums import ObjectType--- jupyter: jupytext: text_representation: @@ -16,28 +16,29 @@ jupyter: In this tutorial we will walk through the capabilities of task trees in pycram. -First we have to import the necessary functionality from pycram. +Next we will create a bullet world with a PR2 in a kitchen containing milk and cereal. ```python -from pycram.bullet_world import BulletWorld -from pycram.robot_descriptions import robot_description -import pycram.task -from pycram.resolver.plans import Arms +from pycram.worlds.bullet_world import BulletWorld +from pycram.robot_description import RobotDescription +import pycram.tasktree +from pycram.datastructures.enums import Arms, ObjectType from pycram.designators.action_designator import * from pycram.designators.location_designator import * from pycram.process_module import simulated_robot from pycram.designators.object_designator import * +from pycram.datastructures.pose import Pose +from pycram.world_concepts.world_object import Object import anytree import pycram.failures -``` +import numpy as np -Next we will create a bullet world with a PR2 in a kitchen containing milk and cereal. +np.random.seed(4) -```python world = BulletWorld() -robot = Object(robot_description.name, "robot", robot_description.name + ".urdf") +robot = Object("pr2", ObjectType.ROBOT, "pr2.urdf") robot_desig = ObjectDesignatorDescription(names=['pr2']).resolve() -apartment = Object("apartment", "environment", "apartment.urdf", position=[-1.5, -2.5, 0]) +apartment = Object("apartment", "environment", "apartment.urdf", pose=Pose([-1.5, -2.5, 0])) apartment_desig = ObjectDesignatorDescription(names=['apartment']).resolve() table_top = apartment.get_link_position("cooktop") # milk = Object("milk", "milk", "milk.stl", position=[table_top[0]-0.15, table_top[1], table_top[2]]) @@ -53,7 +54,7 @@ import numpy as np def get_n_random_positions(pose_list, n=4, dist=0.5, random=True): - positions = [pos[0] for pos in pose_list[:1000]] + positions = [pose.position_as_list() for pose in pose_list[:1000]] all_indices = list(range(len(positions))) print(len(all_indices)) pos_idx = np.random.choice(all_indices) if random else all_indices[0] @@ -98,22 +99,22 @@ def get_n_random_positions(pose_list, n=4, dist=0.5, random=True): ```python from pycram.costmaps import SemanticCostmap -from pycram.pose_generator_and_validator import pose_generator +from pycram.pose_generator_and_validator import PoseGenerator scm = SemanticCostmap(apartment, "island_countertop") -poses_list = list(pose_generator(scm, number_of_samples=-1)) -poses_list.sort(reverse=True, key=lambda x: np.linalg.norm(x[0])) +poses_list = list(PoseGenerator(scm, number_of_samples=-1)) +poses_list.sort(reverse=True, key=lambda x: np.linalg.norm(x.position_as_list())) object_poses = get_n_random_positions(poses_list) -object_names = ["bowl", "milk", "breakfast_cereal", "spoon"] +object_names = ["bowl", "breakfast_cereal", "spoon"] objects = {} object_desig = {} for obj_name, obj_pose in zip(object_names, object_poses): print(obj_name) print(obj_pose) objects[obj_name] = Object(obj_name, obj_name, obj_name + ".stl", - position=[obj_pose[0][0], obj_pose[0][1], table_top[2]]) - objects[obj_name].move_base_to_origin_pos() - objects[obj_name].original_pose = objects[obj_name].get_position_and_orientation() + pose=Pose([obj_pose.position.x, obj_pose.position.y, table_top.z])) + objects[obj_name].move_base_to_origin_pose() + objects[obj_name].original_pose = objects[obj_name].pose object_desig[obj_name] = ObjectDesignatorDescription(names=[obj_name], types=[obj_name]).resolve() print(object_poses) ``` @@ -121,16 +122,16 @@ print(object_poses) If You want to visualize all apartment frames ```python -import pybullet as p +import pycram_bullet as p for link_name in apartment.links.keys(): world.add_vis_axis(apartment.get_link_pose(link_name)) - p.addUserDebugText(link_name, apartment.get_link_position(link_name)) + #p.addUserDebugText(link_name, apartment.get_link_position(link_name), physicsClientId=world.id) ``` ```python world.remove_vis_axis() -p.removeAllUserDebugItems() +#p.removeAllUserDebugItems() ``` Finally, we create a plan where the robot parks his arms, walks to the kitchen counter and picks the thingy. Then we @@ -138,11 +139,12 @@ execute the plan. ```python from pycram.external_interfaces.ik import IKError +from pycram.datastructures.enums import Grasp -@pycram.task.with_tree +@pycram.tasktree.with_tree def plan(obj, obj_desig, torso=0.2, place="countertop"): - world.reset_bullet_world() + world.reset_world() with simulated_robot: ParkArmsActionPerformable(Arms.BOTH).perform() @@ -154,11 +156,16 @@ def plan(obj, obj_desig, torso=0.2, place="countertop"): ParkArmsActionPerformable(Arms.BOTH).perform() good_torsos.append(torso) picked_up_arm = pose.reachable_arms[0] - PickUpActionPerformable(object_designator=obj_desig, arm=pose.reachable_arms[0], grasp="front").perform() + PickUpActionPerformable(object_designator=obj_desig, arm=pose.reachable_arms[0], grasp=Grasp.FRONT).perform() ParkArmsActionPerformable(Arms.BOTH).perform() scm = SemanticCostmapLocation(place, apartment_desig, obj_desig) - pose_island = scm.resolve() + scm = iter(scm) + pose_island = None + for i in range(np.random.randint(5, 15)): + pose_island = next(scm) + print(pose_island) + place_location = CostmapLocation(target=pose_island.pose, reachable_for=robot_desig, reachable_arm=picked_up_arm) @@ -180,7 +187,7 @@ for obj_name in object_names: try: plan(objects[obj_name], object_desig[obj_name], torso=torso, place="island_countertop") done = True - objects[obj_name].original_pose = objects[obj_name].get_position_and_orientation() + objects[obj_name].original_pose = objects[obj_name].pose except (StopIteration, IKError) as e: print(type(e)) print(e) @@ -195,7 +202,7 @@ Now we get the task tree from its module and render it. Rendering can be done wi anytree package. We will use ascii rendering here for ease of displaying. ```python -tt = pycram.task.task_tree +tt = pycram.tasktree.task_tree print(anytree.RenderTree(tt, style=anytree.render.AsciiStyle())) ``` @@ -205,8 +212,8 @@ tree. Hence, a NoOperation node is the root of any tree. If we re-execute the pl tree even though they are not connected. ```python -world.reset_bullet_world() -plan() +world.reset_world() +plan(objects["bowl"], object_desig["bowl"], torso=0.25) print(anytree.RenderTree(tt, style=anytree.render.AsciiStyle())) ``` @@ -216,9 +223,9 @@ reset objects are available. At the end of a with block the old state is restore called ``simulation()``. ```python -with pycram.task.SimulatedTaskTree() as stt: - print(anytree.RenderTree(pycram.task.task_tree, style=anytree.render.AsciiStyle())) -print(anytree.RenderTree(pycram.task.task_tree, style=anytree.render.AsciiStyle())) +with pycram.tasktree.SimulatedTaskTree() as stt: + print(anytree.RenderTree(pycram.tasktree.task_tree, style=anytree.render.AsciiStyle())) +print(anytree.RenderTree(pycram.tasktree.task_tree, style=anytree.render.AsciiStyle())) ``` Task tree can be manipulated with ordinary anytree manipulation. If we for example want to discard the second plan, we @@ -233,42 +240,44 @@ We can now re-execute this (modified) plan by executing the leaf in pre-ordering functionality. This will not append the re-execution to the task tree. ```python -world.reset_bullet_world() +world.reset_world() with simulated_robot: - [node.code.execute() for node in tt.root.leaves] -print(anytree.RenderTree(pycram.task.task_tree, style=anytree.render.AsciiStyle())) + [node.action.perform() for node in tt.root.leaves] +print(anytree.RenderTree(pycram.tasktree.task_tree, style=anytree.render.AsciiStyle())) ``` Nodes in the task tree contain additional information about the status and time of a task. ```python -print(pycram.task.task_tree.children[0]) +print(pycram.tasktree.task_tree.children[0]) ``` The task tree can also be reset to an empty one by invoking: ```python -pycram.task.reset_tree() -print(anytree.RenderTree(pycram.task.task_tree, style=anytree.render.AsciiStyle())) +pycram.tasktree.task_tree.reset_tree() +print(anytree.RenderTree(pycram.tasktree.task_tree, style=anytree.render.AsciiStyle())) ``` -If a plan fails using the PlanFailure exception, the plan will not stop. Instead, the error will be logged and saved in -the task tree as a failed subtask. First let's create a simple failing plan and execute it. +If a plan fails using the PlanFailure exception, the plan will stop and raise the respective error. +Additionally, the error will be logged in the node of the TaskTree. First let's create a simple failing plan and execute it. ```python -@pycram.task.with_tree +@pycram.tasktree.with_tree def failing_plan(): - raise pycram.plan_failures.PlanFailure("Oopsie!") - + raise pycram.failures.PlanFailure("Oopsie!") -failing_plan() +try: + failing_plan() +except pycram.failures.PlanFailure as e: + print(e) ``` We can now investigate the nodes of the tree, and we will see that the tree indeed contains a failed task. ```python -print(anytree.RenderTree(pycram.task.task_tree, style=anytree.render.AsciiStyle())) -print(pycram.task.task_tree.children[0]) +print(anytree.RenderTree(pycram.tasktree.task_tree, style=anytree.render.AsciiStyle())) +print(pycram.tasktree.task_tree.children[0]) ``` ```python diff --git a/examples/improving_actions.md b/examples/improving_actions.md index 4cb46a28e..e3e4cecf0 100644 --- a/examples/improving_actions.md +++ b/examples/improving_actions.md @@ -51,7 +51,7 @@ from pycram.world_concepts.world_object import Object from pycram.robot_descriptions import robot_description from pycram.datastructures.enums import ObjectType, WorldMode from pycram.datastructures.pose import Pose -from pycram.ros.viz_marker_publisher import VizMarkerPublisher +from pycram.ros_utils.viz_marker_publisher import VizMarkerPublisher from pycram.process_module import ProcessModule, simulated_robot from pycram.designators.specialized_designators.probabilistic.probabilistic_action import MoveAndPickUp, Arms, Grasp from pycram.tasktree import task_tree, TaskTree @@ -64,11 +64,11 @@ random.seed(69) Next, we connect to a database where we can store and load robot experiences. ```python -pycrorm_uri = os.getenv('PYCRORM_URI') +pycrorm_uri = "robot_enjoyer:I_love_robots_123@neem-2.informatik.uni-bremen.de:3306/pycram_ci" pycrorm_uri = "mysql+pymysql://" + pycrorm_uri -engine = sqlalchemy.create_engine('sqlite:///:memory:') +engine = sqlalchemy.create_engine(pycrorm_uri) session = sqlalchemy.orm.sessionmaker(bind=engine)() -pycram.orm.base.Base.metadata.create_all(engine) +# pycram.orm.base.Base.metadata.create_all(engine) ``` Now we construct an empty world with just a floating milk, where we can learn about PickUp actions. @@ -99,15 +99,16 @@ Next, we will perform pick up tasks using the default policy and observe the suc The robot will now experiment with the behaviour specified by the default policy and observe his success rate in doing so. After finishing the experiments, we insert the results into the database. +If you want to generate some data locally, you can uncomment the following code. ```python -pycram.orm.base.ProcessMetaData().description = "Experimenting with Pick Up Actions" -fpa.sample_amount = 100 -with simulated_robot: - fpa.batch_rollout() -task_tree.root.insert(session) -session.commit() -task_tree.reset_tree() +# pycram.orm.base.ProcessMetaData().description = "Experimenting with Pick Up Actions" +# fpa.sample_amount = 100 +# with simulated_robot: +# fpa.batch_rollout() +# task_tree.root.insert(session) +# session.commit() +# task_tree.reset_tree() ``` Let's query the data that is needed to learn a pick up action and have a look at it. @@ -122,10 +123,13 @@ samples We can now learn a probabilistic model from the data. We will use the JPT algorithm to learn a model from the data. ```python -variables = infer_variables_from_dataframe(samples, scale_continuous_types=False) +variables = infer_variables_from_dataframe(samples, scale_continuous_types=False, + min_samples_per_quantile=2, + min_likelihood_improvement = 0.) model = JPT(variables, min_samples_leaf=25) model.fit(samples) model = model.probabilistic_circuit +print(model) ``` ```python @@ -147,12 +151,12 @@ Let's make a monte carlo estimate on the success probability of the new model. ```python fpa.policy = model -fpa.sample_amount = 500 +fpa.sample_amount = 5 with simulated_robot: fpa.batch_rollout() ``` -We can see, that our new and improved model has a success probability of 60% as opposed to the 30% from the standard +We can see, that our new and improved model has a better success probability as opposed to the 30% from the standard policy. Next, we put the learned model to the test in a complex environment, where the milk is placed in a difficult to access @@ -206,6 +210,6 @@ with simulated_robot: ``` ```python -# world.exit() -# viz_marker_publisher._stop_publishing() +world.exit() +viz_marker_publisher._stop_publishing() ``` diff --git a/examples/intro.md b/examples/intro.md index c9d3cb5cc..12480fdea 100644 --- a/examples/intro.md +++ b/examples/intro.md @@ -7,7 +7,7 @@ jupyter: format_version: '1.3' jupytext_version: 1.16.3 kernelspec: - display_name: Python 3 + display_name: Python 3 (ipykernel) language: python name: python3 --- @@ -34,6 +34,10 @@ from pycram.datastructures.enums import ObjectType, WorldMode from pycram.datastructures.pose import Pose world = BulletWorld(mode=WorldMode.GUI) + +milk = Object("Milk", ObjectType.MILK, "milk.stl") +pr2 = Object("pr2", ObjectType.ROBOT, "pr2.urdf") +cereal = Object("cereal", ObjectType.BREAKFAST_CEREAL, "breakfast_cereal.stl", pose=Pose([1.4, 1, 0.95])) ``` The BulletWorld allows to render images from arbitrary positions. In the following example we render images with the @@ -72,12 +76,6 @@ Optional: * Color * Ignore Cached Files -If there is only a filename and no path, PyCRAM will check in the resource directory if there is a matching file. - -```python -milk = Object("Milk", ObjectType.MILK, "milk.stl") -``` - Objects provide methods to change the position and rotation, change the color, attach other objects, set the state of joints if the objects has any or get the position and orientation of a link. @@ -88,12 +86,6 @@ methods related to these will not work. milk.set_position(Pose([1, 0, 0])) ``` -To remove an Object from the BulletWorld just call the 'remove' method on the Object. - -```python -milk.remove() -``` - Since everything inside the BulletWorld is an Object, even a complex environment Object like the kitchen can be spawned in the same way as the milk. @@ -187,35 +179,33 @@ Allows for geometric reasoning in the BulletWorld. At the moment the following t * {meth}`~pycram.world_reasoning.blocking` * {meth}`~pycram.world_reasoning.supporting` -To show the geometric reasoning we first spawn a robot as well as the milk Object again. - -```python -import pycram.world_reasoning as btr - -milk = Object("Milk", ObjectType.MILK, "milk.stl", pose=Pose([1, 0, 1])) -pr2 = Object("pr2", ObjectType.ROBOT, "pr2.urdf") -``` - We start with testing for visibility ```python +from pycram.world_reasoning import visible + milk.set_position(Pose([1, 0, 1])) -visible = btr.visible(milk, pr2.get_link_pose("wide_stereo_optical_frame")) +visible = visible(milk, pr2.get_link_pose("wide_stereo_optical_frame")) print(f"Milk visible: {visible}") ``` ```python +from pycram.world_reasoning import contact +from pycram.datastructures.world import World + milk.set_position(Pose([1, 0, 0.05])) -plane = BulletWorld.current_bullet_world.objects[0] -contact = btr.contact(milk, plane) +plane = World.current_world.objects[0] +contact = contact(milk, plane) print(f"Milk is in contact with the floor: {contact}") ``` ```python +from pycram.world_reasoning import reachable + milk.set_position(Pose([0.6, -0.5, 0.7])) -reachable = btr.reachable(milk, pr2, "r_gripper_tool_frame") +reachable = reachable(milk, pr2, "r_gripper_tool_frame") print(f"Milk is reachable for the PR2: {reachable}") ``` @@ -334,13 +324,6 @@ with simulated_robot: To get familiar with the PyCRAM Framework we will write a simple pick and place plan. This plan will let the robot grasp a cereal box from the kitchen counter and place it on the kitchen island. This is a simple pick and place plan. -```python -from pycram.designators.object_designator import * - -cereal = Object("cereal", ObjectType.BREAKFAST_CEREAL, "breakfast_cereal.stl", pose=Pose([1.4, 1, 0.95])) - -``` - ```python from pycram.datastructures.enums import Grasp @@ -350,7 +333,7 @@ robot_desig = ObjectDesignatorDescription(names=["pr2"]).resolve() with simulated_robot: ParkArmsAction([Arms.BOTH]).resolve().perform() - MoveTorsoAction([0.3]).resolve().perform() + MoveTorsoAction([0.33]).resolve().perform() pickup_pose = CostmapLocation(target=cereal_desig.resolve(), reachable_for=robot_desig).resolve() pickup_arm = pickup_pose.reachable_arms[0] diff --git a/examples/language.md b/examples/language.md index 197c879bb..0b0862617 100644 --- a/examples/language.md +++ b/examples/language.md @@ -7,7 +7,7 @@ jupyter: format_version: '1.3' jupytext_version: 1.16.3 kernelspec: - display_name: Python 3 + display_name: Python 3 (ipykernel) language: python name: python3 --- @@ -84,7 +84,7 @@ pr2 = Object("pr2", ObjectType.ROBOT, "pr2.urdf") ```python from pycram.process_module import simulated_robot -world.reset_bullet_world() +world.reset_world() with simulated_robot: plan.perform() @@ -104,7 +104,7 @@ from pycram.datastructures.pose import Pose from pycram.datastructures.enums import Arms from pycram.process_module import simulated_robot -world.reset_bullet_world() +world.reset_world() navigate = NavigateAction([Pose([1, 1, 0])]) park = ParkArmsAction([Arms.BOTH]) @@ -166,7 +166,7 @@ from pycram.datastructures.pose import Pose from pycram.datastructures.enums import Arms from pycram.process_module import simulated_robot -world.reset_bullet_world() +world.reset_world() navigate = NavigateAction([Pose([1, 1, 0])]) park = ParkArmsAction([Arms.BOTH]) @@ -330,4 +330,4 @@ If you are finished with this example you can close the world with the cell belo ```python world.exit() -``` \ No newline at end of file +``` diff --git a/examples/location_designator.md b/examples/location_designator.md index 1fa54ef03..7d1fcd5c2 100644 --- a/examples/location_designator.md +++ b/examples/location_designator.md @@ -7,7 +7,7 @@ jupyter: format_version: '1.3' jupytext_version: 1.16.3 kernelspec: - display_name: Python 3 + display_name: Python 3 (ipykernel) language: python name: python3 --- @@ -43,8 +43,9 @@ from pycram.world_concepts.world_object import Object from pycram.datastructures.enums import ObjectType, WorldMode from pycram.datastructures.pose import Pose -world = BulletWorld() -kitchen = Object("kitchen", ObjectType.ENVIRONMENT, "kitchen.urdf") +world = BulletWorld(WorldMode.GUI) +apartment = Object("apartment", ObjectType.ENVIRONMENT, "apartment.urdf") +pr2 = Object("pr2", ObjectType.ROBOT, "pr2.urdf") ``` Next up we will create the location designator description, the {meth}`~pycram.designators.location_designator.CostmapLocation` that we will be using needs a @@ -57,7 +58,7 @@ which we will be extending later. ```python from pycram.designators.location_designator import CostmapLocation -target = kitchen.get_pose() +target = apartment.get_pose() location_description = CostmapLocation(target) @@ -76,8 +77,7 @@ Since a robot is needed we will use the PR2 and use a milk as a target point for PR2 will be set to 0.2 since otherwise the arms of the robot will be too low to reach on the countertop. ```python -pr2 = Object("pr2", ObjectType.ROBOT, "pr2.urdf") -pr2.set_joint_state("torso_lift_joint", 0.2) +pr2.set_joint_position("torso_lift_joint", 0.2) milk = Object("milk", ObjectType.MILK, "milk.stl", pose=Pose([1.3, 1, 0.9])) ``` @@ -106,11 +106,6 @@ designator but this time we use the ```visible_for``` parameter. For this example we need the milk as well as the PR2, so if you did not spawn them during the previous location designator you can spawn them with the following cell. -```python -pr2 = Object("pr2", ObjectType.ROBOT, "pr2.urdf") -milk = Object("milk", ObjectType.MILK, "milk.stl", pose=Pose([1.3, 1, 0.9])) -``` - ```python from pycram.designators.location_designator import CostmapLocation from pycram.designators.object_designator import BelieveObject @@ -136,19 +131,14 @@ the island is a part of the kitchen and the link name of the island surface. For this example we need the kitchen as well as the milk. If you spawned them in one of the previous examples you don't need to execute the following cell. -```python -kitchen = Object("kitchen", ObjectType.ENVIRONMENT, "kitchen.urdf") -milk = Object("milk", ObjectType.MILK, "milk.stl") -``` - ```python from pycram.designators.location_designator import SemanticCostmapLocation from pycram.designators.object_designator import BelieveObject -kitchen_desig = BelieveObject(names=["kitchen"]).resolve() +kitchen_desig = BelieveObject(names=["apartment"]).resolve() milk_desig = BelieveObject(names=["milk"]).resolve() -location_description = SemanticCostmapLocation(urdf_link_name="kitchen_island_surface", +location_description = SemanticCostmapLocation(urdf_link_name="island_countertop", part_of=kitchen_desig, for_object=milk_desig) @@ -163,10 +153,6 @@ for the location described in the description. This can be useful if the first p We will see this at the example of a location designator for visibility. For this example we need the milk, if you already have a milk spawned in you world you can ignore the following cell. -```python -milk = Object("milk", ObjectType.MILK, "milk.stl") -``` - ```python from pycram.designators.location_designator import CostmapLocation from pycram.designators.object_designator import BelieveObject @@ -188,26 +174,14 @@ designator which describes the handle of the drawer. At the moment this location designator only works in the apartment environment, so please remove the kitchen if you spawned it in a previous example. Furthermore, we need a robot, so we also spawn the PR2 if it isn't spawned already. -```python -kitchen.remove() -``` - -```python -apartment = Object("apartment", ObjectType.ENVIRONMENT, "apartment.urdf") -``` - -```python -pr2 = Object("pr2", ObjectType.ROBOT, "pr2.urdf") -pr2.set_joint_state("torso_lift_joint", 0.25) -``` - ```python from pycram.designators.object_designator import * from pycram.designators.location_designator import * +from pycram.datastructures.enums import ObjectType apartment_desig = BelieveObject(names=["apartment"]) handle_desig = ObjectPart(names=["handle_cab10_t"], part_of=apartment_desig.resolve()) -robot_desig = BelieveObject(names=["pr2"]) +robot_desig = BelieveObject(types=[ObjectType.ROBOT]) access_location = AccessingLocation(handle_desig.resolve(), robot_desig.resolve()).resolve() print(access_location.pose) @@ -223,12 +197,15 @@ robot to reach a target pose. work. ```python -from pycram.designators.specialized_designators.location.giskard_location import GiskardLocation - -robot_desig = BelieveObject(names=["pr2"]).resolve() +import rosnode +if "/giskard" in rosnode.get_node_names(): -loc = GiskardLocation(target=Pose([1, 1, 1]), reachable_for=robot_desig).resolve() -print(loc.pose) + from pycram.designators.specialized_designators.location.giskard_location import GiskardLocation + + robot_desig = BelieveObject(names=["pr2"]).resolve() + + loc = GiskardLocation(target=Pose([1, 1, 1]), reachable_for=robot_desig).resolve() + print(loc.pose) ``` If you are finished with this example you can close the world with the following cell: diff --git a/examples/minimal_task_tree.md b/examples/minimal_task_tree.md index ff93d7680..124746378 100644 --- a/examples/minimal_task_tree.md +++ b/examples/minimal_task_tree.md @@ -7,7 +7,7 @@ jupyter: format_version: '1.3' jupytext_version: 1.16.3 kernelspec: - display_name: Python 3 + display_name: Python 3 (ipykernel) language: python name: python3 --- @@ -89,7 +89,7 @@ print(anytree.RenderTree(tt.root)) As we see every task in the plan got recorded correctly. It is noticeable that the tree begins with a NoOperation node. This is done because several, not connected, plans that get executed after each other should still appear in the task tree. Hence, a NoOperation node is the root of any tree. If we re-execute the plan we would see them appear in the same tree even though they are not connected. ```python -world.reset_current_world() +world.reset_world() plan() print(anytree.RenderTree(tt.root)) ``` @@ -113,21 +113,21 @@ We can now re-execute this (modified) plan by executing the leaf in pre-ordering ```python world.reset_world() with simulated_robot: - [node.code.execute() for node in tt.root.leaves] -print(anytree.RenderTree(tt.root, style=anytree.render.AsciiStyle())) + [node.action.perform() for node in tt.root.leaves] +print(anytree.RenderTree(pycram.tasktree.task_tree, style=anytree.render.AsciiStyle())) ``` Nodes in the task tree contain additional information about the status and time of a task. ```python -print(pycram.tasktree.task_tree.root.children[0]) +print(pycram.tasktree.task_tree.children[0]) ``` The task tree can also be reset to an empty one by invoking ```python pycram.tasktree.task_tree.reset_tree() -print(anytree.RenderTree(pycram.tasktree.task_tree.root, style=anytree.render.AsciiStyle())) +print(anytree.RenderTree(pycram.tasktree.task_tree, style=anytree.render.AsciiStyle())) ``` If a plan fails using the PlanFailure exception, the plan will not stop. Instead, the error will be logged and saved in the task tree as a failed subtask. First let's create a simple failing plan and execute it. @@ -135,19 +135,19 @@ If a plan fails using the PlanFailure exception, the plan will not stop. Instead ```python @pycram.tasktree.with_tree def failing_plan(): - raise pycram.plan_failures.PlanFailure("Oopsie!") + raise pycram.failures.PlanFailure("Oopsie!") try: failing_plan() -except pycram.plan_failures.PlanFailure as e: +except pycram.failures.PlanFailure as e: print(e) ``` We can now investigate the nodes of the tree, and we will see that the tree indeed contains a failed task. ```python -print(anytree.RenderTree(pycram.tasktree.task_tree.root, style=anytree.render.AsciiStyle())) -print(pycram.tasktree.task_tree.root.children[0]) +print(anytree.RenderTree(pycram.tasktree.task_tree, style=anytree.render.AsciiStyle())) +print(pycram.tasktree.task_tree.children[0]) ``` ```python diff --git a/examples/motion_designator.md b/examples/motion_designator.md index a1eb4d8a1..ba5746dbd 100644 --- a/examples/motion_designator.md +++ b/examples/motion_designator.md @@ -26,9 +26,11 @@ BulletWorld as well as a PR2 robot. from pycram.worlds.bullet_world import BulletWorld from pycram.world_concepts.world_object import Object from pycram.datastructures.enums import ObjectType, WorldMode +from pycram.datastructures.pose import Pose world = BulletWorld(WorldMode.GUI) pr2 = Object("pr2", ObjectType.ROBOT, "pr2.urdf") +milk = Object("milk", ObjectType.MILK, "milk.stl", pose=Pose([1.5, 0, 1])) ``` ## Move @@ -106,10 +108,6 @@ view (FOV) this motion designator will return an object designator describing th Since we need an object that we can detect, we will spawn a milk for this. -```python -milk = Object("milk", ObjectType.MILK, "milk.stl", pose=Pose([1.5, 0, 1])) -``` - ```python from pycram.designators.motion_designator import DetectingMotion, LookingMotion from pycram.process_module import simulated_robot @@ -147,10 +145,6 @@ As long as the object is somewhere in the belief state (BulletWorld) a resolved Sine we want to detect something we will spawn an object that we can detect. If you already spawned the milk from the previous example, you can skip this step. -```python -milk = Object("milk", ObjectType.MILK, "milk.stl", pose=Pose([-1, 0, 1])) -``` - ```python from pycram.designators.motion_designator import WorldStateDetectingMotion from pycram.process_module import simulated_robot diff --git a/examples/object_designator.md b/examples/object_designator.md index db4d8645b..dbc8568a8 100644 --- a/examples/object_designator.md +++ b/examples/object_designator.md @@ -87,10 +87,6 @@ a link of the kitchen which is a BulletWorld Object. For this example we need just need the kitchen, if you didn't spawn it in the previous example you can spawn it with the following cell. -```python -kitchen = Object("kitchen", ObjectType.ENVIRONMENT, "kitchen.urdf") -``` - ```python from pycram.designators.object_designator import ObjectPart, BelieveObject @@ -108,13 +104,6 @@ are describing. We will see this at the example of an object designator describi For this we need some objects, so if you didn't already spawn them you can use the next cell for this. -```python -kitchen = Object("kitchen", ObjectType.ENVIRONMENT, "kitchen.urdf") -milk = Object("milk", ObjectType.MILK, "milk.stl", pose=Pose([1.3, 1, 0.9])) -cereal = Object("froot_loops", ObjectType.BREAKFAST_CEREAL, "breakfast_cereal.stl", pose=Pose([1.3, 0.9, 0.95])) -spoon = Object("spoon", ObjectType.SPOON, "spoon.stl", pose=Pose([1.3, 1.1, 0.87])) -``` - ```python from pycram.designators.object_designator import BelieveObject diff --git a/examples/orm_querying_examples.md b/examples/orm_querying_examples.md index b24c7f740..975bc1efe 100644 --- a/examples/orm_querying_examples.md +++ b/examples/orm_querying_examples.md @@ -30,8 +30,7 @@ import tqdm import pycram.orm.base from pycram.worlds.bullet_world import BulletWorld from pycram.world_concepts.world_object import Object as BulletWorldObject -from pycram.designators.action_designator import MoveTorsoAction, PickUpAction, NavigateAction, ParkArmsAction, - ParkArmsActionPerformable, MoveTorsoActionPerformable +from pycram.designators.action_designator import MoveTorsoAction, PickUpAction, NavigateAction, ParkArmsAction, ParkArmsActionPerformable, MoveTorsoActionPerformable from pycram.designators.object_designator import ObjectDesignatorDescription from pycram.failures import PlanFailure from pycram.process_module import ProcessModule @@ -140,7 +139,6 @@ class GraspingExplorer: # try to execute a grasping plan with simulated_robot: - ParkArmsActionPerformable(Arms.BOTH).perform() # navigate to sampled position NavigateAction([Pose(position, orientation)]).resolve().perform() @@ -188,7 +186,7 @@ session = sqlalchemy.orm.Session(bind=engine) pycram.orm.base.Base.metadata.create_all(bind=engine) session.commit() -explorer = GraspingExplorer(samples_per_scenario=30) +explorer = GraspingExplorer(samples_per_scenario=10) explorer.perform(session) ``` @@ -248,7 +246,7 @@ query = (select(TaskTreeNode.status, Object.obj_type, print(query) df = pd.read_sql(query, session.get_bind()) -df["status"] = df["status"].apply(lambda x: str(x.name)) +df["status"] = df["status"].apply(lambda x: str(x)) print(df) ``` diff --git a/examples/robot_description.md b/examples/robot_description.md index 9782b2cf7..0de1b831f 100644 --- a/examples/robot_description.md +++ b/examples/robot_description.md @@ -7,7 +7,7 @@ jupyter: format_version: '1.3' jupytext_version: 1.16.3 kernelspec: - display_name: Python 3 + display_name: Python 3 (ipykernel) language: python name: python3 --- @@ -49,7 +49,7 @@ import rospkg rospack = rospkg.RosPack() filename = rospack.get_path('pycram') + '/resources/robots/' + "pr2" + '.urdf' -pr2_description = RobotDescription("pr2", "base_link", "torso_lift_link", "torso_lift_joint", filename) +pr2_description = RobotDescription("pr2_example", "base_link", "torso_lift_link", "torso_lift_joint", filename) ``` ## Kinematic Chain Description diff --git a/requirements-resolver.txt b/requirements-resolver.txt index efcd1c22b..fb6e71116 100644 --- a/requirements-resolver.txt +++ b/requirements-resolver.txt @@ -1,2 +1,5 @@ -r requirements.txt - +pandas>=2.0.3 +psycopg2>=2.9.10 +pyjpt>=0.1.31 +pymysql>=1.0.3 diff --git a/src/pycram/datastructures/world.py b/src/pycram/datastructures/world.py index 9097b7e85..a3d96e1c8 100644 --- a/src/pycram/datastructures/world.py +++ b/src/pycram/datastructures/world.py @@ -1540,8 +1540,6 @@ class UseProspectionWorld: with UseProspectionWorld(): NavigateAction.Action([[1, 0, 0], [0, 0, 0, 1]]).perform() """ - - def __init__(self): self.prev_world: Optional[World] = None # The previous world is saved to restore it after the with block is exited. diff --git a/src/pycram/designators/location_designator.py b/src/pycram/designators/location_designator.py index 5bd50bcfb..22b266123 100644 --- a/src/pycram/designators/location_designator.py +++ b/src/pycram/designators/location_designator.py @@ -184,7 +184,8 @@ def __iter__(self): res = True arms = None if self.visible_for: - res = res and visibility_validator(maybe_pose, test_robot, target_pose, + visible_prospection_object = World.current_world.get_prospection_object_for_object(self.target.world_object) + res = res and visibility_validator(maybe_pose, test_robot, visible_prospection_object, World.current_world) if self.reachable_for: hand_links = [] @@ -218,7 +219,7 @@ def __init__(self, handle_desig: ObjectPart.Object, robot_desig: ObjectDesignato drawer will be opened. Calculating the pose while the drawer is open could lead to problems. :param handle_desig: ObjectPart designator for handle of the drawer - :param robot: Object designator for the robot which should open the drawer + :param robot_desig: Object designator for the robot which should open the drawer :param resolver: An alternative specialized_designators to create the location """ super().__init__(resolver) @@ -244,7 +245,7 @@ def __iter__(self) -> Location: # ground_pose = [[self.handle.part_pose[0][0], self.handle.part_pose[0][1], 0], self.handle.part_pose[1]] ground_pose = Pose(self.handle.part_pose.position_as_list()) ground_pose.position.z = 0 - occupancy = OccupancyCostmap(distance_to_obstacle=0.4, from_ros=False, size=200, resolution=0.02, + occupancy = OccupancyCostmap(distance_to_obstacle=0.25, from_ros=False, size=200, resolution=0.02, origin=ground_pose) gaussian = GaussianCostmap(200, 15, 0.02, ground_pose) diff --git a/src/pycram/language.py b/src/pycram/language.py index 1d8180e30..8ed8e17a2 100644 --- a/src/pycram/language.py +++ b/src/pycram/language.py @@ -211,14 +211,16 @@ def perform(self): :return: """ + results = [] for i in range(self.repeat): for child in self.children: if self.interrupted: return try: - child.resolve().perform() + results.append(child.resolve().perform()) except PlanFailure as e: self.root.exceptions[self] = e + return State.SUCCEEDED, results def __init__(self, parent: NodeMixin = None, children: Iterable[NodeMixin] = None, repeat: int = 1): """ diff --git a/src/pycram/tasktree.py b/src/pycram/tasktree.py index 06440829e..697833c4f 100644 --- a/src/pycram/tasktree.py +++ b/src/pycram/tasktree.py @@ -91,7 +91,7 @@ def __str__(self): "Status: %s \n " \ "start_time: %s \n " \ "end_time: %s \n " \ - "" % (str(self.action), self.start_time, self.status, self.end_time) + "" % (str(self.action), self.status, self.start_time, self.end_time) def __repr__(self): return str(self.action.__class__.__name__) @@ -208,6 +208,12 @@ def __init__(self): """ self.root = TaskTreeNode() self.current_node = self.root + self.name = "TaskTree" + self.insert = self.root.insert + + @property + def children(self): + return self.root.children def __len__(self): """ diff --git a/src/pycram/worlds/bullet_world.py b/src/pycram/worlds/bullet_world.py index 90851e466..4b1ddff97 100755 --- a/src/pycram/worlds/bullet_world.py +++ b/src/pycram/worlds/bullet_world.py @@ -1,6 +1,7 @@ # used for delayed evaluation of typing until python 3.11 becomes mainstream from __future__ import annotations +import os import threading import time @@ -16,6 +17,7 @@ from ..datastructures.world import World from ..object_descriptors.generic import ObjectDescription as GenericObjectDescription from ..object_descriptors.urdf import ObjectDescription +from ..ros.logging import loginfo from ..validation.goal_validator import (validate_multiple_joint_positions, validate_joint_position, validate_object_pose, validate_multiple_object_poses) from ..world_concepts.constraints import Constraint @@ -448,6 +450,11 @@ def __init__(self, world: World, mode: WorldMode): self.world = world self.mode: WorldMode = mode + # Checks if there is a display connected to the system. If not, the simulation will be run in direct mode. + if not "DISPLAY" in os.environ: + loginfo("No display detected. Running the simulation in direct mode.") + self.mode = WorldMode.DIRECT + def run(self): """ Initializes the new simulation and checks in an endless loop