From 757c5052f9f566c78b67355d4562851dd1dc35a6 Mon Sep 17 00:00:00 2001 From: Naomi Pentrel <5212232+npentrel@users.noreply.github.com> Date: Thu, 14 Sep 2023 19:21:15 -0400 Subject: [PATCH 1/7] Fix python formatting everywhere --- .flake8 | 4 + docs/components/arm/_index.md | 11 +- docs/components/base/_index.md | 28 +- docs/components/board/_index.md | 38 ++- docs/components/camera/_index.md | 8 +- docs/components/gantry/_index.md | 12 +- docs/components/input-controller/_index.md | 34 ++- docs/components/motor/_index.md | 5 +- docs/components/movement-sensor/_index.md | 34 ++- docs/program/_index.md | 2 + docs/program/apis/cloud.md | 36 ++- docs/program/apis/robot.md | 3 +- docs/program/apis/sessions.md | 6 +- docs/program/run.md | 19 +- docs/services/frame-system/_index.md | 6 +- docs/services/motion/_index.md | 34 ++- docs/services/motion/constraints.md | 11 +- docs/services/navigation/_index.md | 5 +- docs/services/slam/_index.md | 6 +- docs/services/vision/_index.md | 3 +- docs/services/vision/classification.md | 3 +- .../tutorials/configure/build-a-mock-robot.md | 13 +- docs/tutorials/custom/custom-base-dog.md | 23 +- docs/tutorials/get-started/confetti-bot.md | 7 +- docs/tutorials/get-started/lazy-susan.md | 4 +- docs/tutorials/get-started/try-viam-sdk.md | 3 + docs/tutorials/projects/claw-game.md | 98 +++++-- docs/tutorials/projects/guardian.md | 20 +- .../projects/make-a-plant-watering-robot.md | 52 ++-- .../tutorials/projects/pet-treat-dispenser.md | 21 +- .../tutorials/projects/send-security-photo.md | 23 +- docs/tutorials/projects/tipsy.md | 116 +++++---- .../accessing-and-moving-robot-arm.md | 4 +- .../services/color-detection-scuttle.md | 241 +++++++++--------- docs/tutorials/services/constrain-motion.md | 167 ++++++++---- .../services/plan-motion-with-arm-gripper.md | 100 ++++++-- .../services/webcam-line-follower-robot.md | 15 +- 37 files changed, 761 insertions(+), 454 deletions(-) create mode 100644 .flake8 diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000000..94e74f5178 --- /dev/null +++ b/.flake8 @@ -0,0 +1,4 @@ +[flake8] +extend-ignore = F704, F821, F401, F841 +exclude = .git,build,dist +max-complexity = 20 \ No newline at end of file diff --git a/docs/components/arm/_index.md b/docs/components/arm/_index.md index a4e3661fe9..72cae037e3 100644 --- a/docs/components/arm/_index.md +++ b/docs/components/arm/_index.md @@ -257,14 +257,16 @@ For more information, see the [Python SDK Docs](https://python.viam.dev/_modules ```python {class="line-numbers linkable-line-numbers"} my_arm = Arm.from_robot(robot=robot, name="my_arm") -# Declare a list of values with your desired rotational value for each joint on the arm. +# Declare a list of values with your desired rotational value for each joint on +# the arm. degrees = [0.0, 45.0, 0.0, 0.0, 0.0] # Declare a new JointPositions with these values. -jointPos = arm.move_to_joint_positions(JointPositions(values=[0.0, 45.0, 0.0, 0.0, 0.0])) +jointPos = arm.move_to_joint_positions( + JointPositions(values=[0.0, 45.0, 0.0, 0.0, 0.0])) # Move each joint of the arm to the position these values specify. -await my_arm.move_to_joint_positions(positions= jointPos) +await my_arm.move_to_joint_positions(positions=jointPos) ``` {{% /tab %}} @@ -425,14 +427,13 @@ For more information, see the [Python SDK Docs](https://python.viam.dev/autoapi/ my_arm = Arm.from_robot(robot=robot, name="my_arm") # Get the kinematics information associated with the arm. -kinematics := await my_arm.get_kinematics() +kinematics = await my_arm.get_kinematics() # Get the format of the kinematics file. k_file = kinematics[0] # Get the byte contents of the file. k_bytes = kinematics[1] - ``` {{% /tab %}} diff --git a/docs/components/base/_index.md b/docs/components/base/_index.md index e1e6cd69bd..2be2089c00 100644 --- a/docs/components/base/_index.md +++ b/docs/components/base/_index.md @@ -225,19 +225,27 @@ my_base = Base.from_robot(robot=robot, name="my_base") # Make your wheeled base move forward. Set linear power to 75%. print("move forward") -await my_base.set_power(linear=Vector3(x=0,y=-.75,z=0), angular=Vector3(x=0,y=0,z=0)) +await my_base.set_power( + linear=Vector3(x=0, y=-.75, z=0), + angular=Vector3(x=0, y=0, z=0)) # Make your wheeled base move backward. Set linear power to -100%. print("move backward") -await my_base.set_power(linear=Vector3(x=0,y=-1.0,z=0), angular=Vector3(x=0,y=0,z=0)) +await my_base.set_power( + linear=Vector3(x=0, y=-1.0, z=0), + angular=Vector3(x=0, y=0, z=0)) # Make your wheeled base spin left. Set angular power to 100%. print("spin left") -await my_base.set_power(linear=Vector3(x=0,y=0,z=0), angular=Vector3(x=0,y=0,z=1)) +await my_base.set_power( + linear=Vector3(x=0, y=0, z=0), + angular=Vector3(x=0, y=0, z=1)) # Make your wheeled base spin right. Set angular power to -75%. print("spin right") -await my_base.set_power(linear=Vector3(x=0,y=0,z=0), angular=Vector3(x=0,y=0,z=-.75)) +await my_base.set_power( + linear=Vector3(x=0, y=0, z=0), + angular=Vector3(x=0, y=0, z=-.75)) ``` {{% /tab %}} @@ -309,8 +317,10 @@ For more information, see the [Python SDK Docs](https://python.viam.dev/autoapi/ ```python {class="line-numbers linkable-line-numbers"} my_base = Base.from_robot(robot=robot, name="my_base") -# Set the linear velocity to 50 mm/sec and the angular velocity to 15 degree/sec. -await my_base.set_velocity(linear=Vector3(x=0,y=50,z=0), angular=Vector3(x=0,y=0,z=15)) +# Set the linear velocity to 50 mm/sec and the angular velocity to +# 15 degree/sec. +await my_base.set_velocity( + linear=Vector3(x=0, y=50, z=0), angular=Vector3(x=0, y=0, z=15)) ``` {{% /tab %}} @@ -469,10 +479,10 @@ For more information, see the [Python SDK Docs](https://python.viam.dev/autoapi/ ```python {class="line-numbers linkable-line-numbers"} my_base = Base.from_robot(robot=robot, name="my_base") -# Get the width and turning radius of the base +# Get the width and turning radius of the base properties = await my_base.get_properties() -# Get the width +# Get the width print(f"Width of base in meters: {properties.width}") # Get the turning radius @@ -497,7 +507,7 @@ For more information, see the [Go SDK Docs](https://pkg.go.dev/go.viam.com/rdk/c ```go {class="line-numbers linkable-line-numbers"} myBase, err := base.FromRobot(robot, "my_base") -// Get the width and turning radius of the base +// Get the width and turning radius of the base properties, err := myBase.Properties(context.Background(), nil) // Get the width diff --git a/docs/components/board/_index.md b/docs/components/board/_index.md index b7bdfecf42..5b96f7d6d4 100644 --- a/docs/components/board/_index.md +++ b/docs/components/board/_index.md @@ -448,10 +448,11 @@ Get an [`DigitalInterrupt`](#digital_interrupts) by `name.` For more information, see the [Python SDK Docs](https://python.viam.dev/autoapi/viam/components/board/index.html#viam.components.board.Board.digital_interrupt_by_name). ```python -my_board = Board.from_robot(robot=robot, name=) +my_board = Board.from_robot(robot=robot, name="my_board") # Get the DigitalInterrupt "my_example_digital_interrupt". -interrupt = await my_board.digital_interrupt_by_name(name="my_example_digital_interrupt") +interrupt = await my_board.digital_interrupt_by_name( + name="my_example_digital_interrupt") ``` {{% /tab %}} @@ -1047,7 +1048,8 @@ my_board = Board.from_robot(robot=robot, name="my_board") # Get the GPIOPin with pin number 15. pin = await my_board.GPIO_pin_by_name(name="15") -# Set the duty cycle to .6, meaning that this pin will be in the high state for 60% of the duration of the PWM interval period. +# Set the duty cycle to .6, meaning that this pin will be in the high state for +# 60% of the duration of the PWM interval period. await pin.set_pwm(cycle=.6) ``` @@ -1225,9 +1227,11 @@ pin = await my_board.GPIO_pin_by_name(name="15") duty_cycle = await pin.get_pwm() # Get the AnalogReader "my_example_analog_reader". -reader = await my_board.analog_reader_by_name(name="my_example_analog_reader") +reader = await my_board.analog_reader_by_name( + name="my_example_analog_reader") -# Get the value of the digital signal "my_example_analog_reader" has most recently measured. +# Get the value of the digital signal "my_example_analog_reader" has most +# recently measured. reading = reader.read() ``` @@ -1288,9 +1292,11 @@ For more information, see the [Python SDK Docs](https://python.viam.dev/autoapi/ my_board = Board.from_robot(robot=robot, name="my_board") # Get the DigitalInterrupt "my_example_digital_interrupt". -interrupt = await my_board.digital_interrupt_by_name(name="my_example_digital_interrupt") +interrupt = await my_board.digital_interrupt_by_name( + name="my_example_digital_interrupt") -# Get the amount of times this DigitalInterrupt has been interrupted with a tick. +# Get the amount of times this DigitalInterrupt has been interrupted with a +# tick. count = await interrupt.value() ``` @@ -1341,9 +1347,11 @@ For more information, see the [Python SDK Docs](https://python.viam.dev/autoapi/ my_board = Board.from_robot(robot=robot, name="my_board") # Get the DigitalInterrupt "my_example_digital_interrupt". -interrupt = await my_board.digital_interrupt_by_name(name="my_example_digital_interrupt") +interrupt = await my_board.digital_interrupt_by_name( + name="my_example_digital_interrupt") -# Get the rolling average of the pulse width across each time the DigitalInterrupt is interrupted with a tick. +# Get the rolling average of the pulse width across each time the +# DigitalInterrupt is interrupted with a tick. rolling_avg = await interrupt.value() ``` @@ -1407,7 +1415,8 @@ For more information, see the [Python SDK Docs](https://python.viam.dev/autoapi/ my_board = Board.from_robot(robot=robot, name="my_board") # Get the DigitalInterrupt "my_example_digital_interrupt". -interrupt = await my_board.digital_interrupt_by_name(name="my_example_digital_interrupt") +interrupt = await my_board.digital_interrupt_by_name( + name="my_example_digital_interrupt") # Record an interrupt and notify any interested callbacks. await interrupt.tick(high=true, nanos=12345) @@ -1431,7 +1440,8 @@ For more information, see the [Python SDK Docs](https://python.viam.dev/autoapi/ my_board = Board.from_robot(robot=robot, name="my_board") # Get the DigitalInterrupt "my_example_digital_interrupt". -interrupt = await my_board.digital_interrupt_by_name(name="my_example_digital_interrupt") +interrupt = await my_board.digital_interrupt_by_name( + name="my_example_digital_interrupt") # Record an interrupt and notify any interested callbacks. await interrupt.tick(high=true, nanos=12345) @@ -1524,7 +1534,8 @@ pin = await my_board.GPIO_pin_by_name(name="15") callback_queue = Queue(maxsize=10) # Get the DigitalInterrupt "my_example_digital_interrupt". -interrupt = await my_board.digital_interrupt_by_name(name="my_example_digital_interrupt") +interrupt = await my_board.digital_interrupt_by_name( + name="my_example_digital_interrupt") # Add a queue to the interrupt. interrupt.add_callback(callback_queue) @@ -1618,7 +1629,8 @@ For more information, see the [Python SDK Docs](https://python.viam.dev/autoapi/ my_board = Board.from_robot(robot=robot, name="my_board") # Get the DigitalInterrupt "my_example_digital_interrupt". -interrupt = await my_board.digital_interrupt_by_name(name="my_example_digital_interrupt") +interrupt = await my_board.digital_interrupt_by_name( + name="my_example_digital_interrupt") ``` ``` --> diff --git a/docs/components/camera/_index.md b/docs/components/camera/_index.md index 049de515a1..6b74da94d5 100644 --- a/docs/components/camera/_index.md +++ b/docs/components/camera/_index.md @@ -133,7 +133,7 @@ frame = await my_cam.get_image() # Convert "frame" to a standard 2D image representation. # Remove the 1st 3x8 bytes and reshape the raw bytes to List[List[Int]]. -standard_frame frame.bytes_to_depth_array() +standard_frame = frame.bytes_to_depth_array() ``` {{% alert title="Tip" color="tip" %}} @@ -253,7 +253,7 @@ To deserialize the returned information into a numpy array, use the Open3D libra import numpy as np import open3d as o3d -my_camera= Camera.from_robot(robot=robot, name="my_camera") +my_camera = Camera.from_robot(robot=robot, name="my_camera") data, _ = await my_camera.get_point_cloud() @@ -306,7 +306,7 @@ Get the camera intrinsic parameters and camera distortion, as well as whether th - [(Properties)](https://python.viam.dev/autoapi/viam/components/camera/index.html#viam.components.camera.Camera.Properties): The properties of the camera. ```python {class="line-numbers linkable-line-numbers"} -my_camera= Camera.from_robot(robot=robot, name="my_camera") +my_camera = Camera.from_robot(robot=robot, name="my_camera") properties = await my_camera.get_properties() ``` @@ -356,7 +356,7 @@ If you are implementing your own camera and adding features that have no native - [(Dict[str, Any])](https://docs.python.org/3/library/stdtypes.html#typesmapping): Result of the executed command. ```python {class="line-numbers linkable-line-numbers"} -my_camera= Camera.from_robot(robot, "my_camera") +my_camera = Camera.from_robot(robot, "my_camera") command = {"cmd": "test", "data1": 500} result = my_camera.do(command) diff --git a/docs/components/gantry/_index.md b/docs/components/gantry/_index.md index 3a11356b7c..c3cb8de3ab 100644 --- a/docs/components/gantry/_index.md +++ b/docs/components/gantry/_index.md @@ -156,13 +156,15 @@ For more information, see the [Python SDK Docs](https://python.viam.dev/_modules ```python my_gantry = Gantry.from_robot(robot=robot, name="my_gantry") -# Create a list of positions for the axes of the gantry to move to. Assume in this example that the gantry is multi-axis, with 3 axes. +# Create a list of positions for the axes of the gantry to move to. Assume in +# this example that the gantry is multi-axis, with 3 axes. examplePositions = [1, 2, 3] exampleSpeeds = [3, 9, 12] # Move the axes of the gantry to the positions specified. -await my_gantry.move_to_position(positions=examplePositions, speeds=exampleSpeeds) +await my_gantry.move_to_position( + positions=examplePositions, speeds=exampleSpeeds) ``` {{% /tab %}} @@ -315,7 +317,8 @@ For more information, see the [Python SDK Docs](https://python.viam.dev/_modules ```python my_gantry = Gantry.from_robot(robot=robot, name="my_gantry") -# Stop all motion of the gantry. It is assumed that the gantry stops immediately. +# Stop all motion of the gantry. It is assumed that the gantry stops +# immediately. await my_gantry.stop() ``` @@ -363,7 +366,8 @@ For more information, see the [Python SDK Docs](https://python.viam.dev/_modules ```python my_gantry = Gantry.from_robot(robot=robot, name="my_gantry") -# Stop all motion of the gantry. It is assumed that the gantry stops immediately. +# Stop all motion of the gantry. It is assumed that the gantry stops +# immediately. await my_gantry.stop() # Print if the gantry is currently moving. diff --git a/docs/components/input-controller/_index.md b/docs/components/input-controller/_index.md index 3131dd13d4..5e320b19d1 100644 --- a/docs/components/input-controller/_index.md +++ b/docs/components/input-controller/_index.md @@ -112,31 +112,38 @@ Doing so registers the same callback to both `ButtonPress` and `ButtonRelease`, For more information, see the [Python SDK Docs](https://python.viam.dev/autoapi/viam/components/input/index.html#viam.components.input.Controller.register_control_callback). ```python {class="line-numbers linkable-line-numbers"} -# Define a function to handle pressing the Start Menu Button "BUTTON_START" on your controller, printing out the start time. +# Define a function to handle pressing the Start Menu Button "BUTTON_START" on +# your controller, printing out the start time. def print_start_time(event): print(f"Start Menu Button was pressed at this time:\n{event.time}") + # Define a function that handles the controller. async def handle_controller(controller): - # Get the list of Controls on the controller. controls = await controller.get_controls() - # If the "BUTTON_START" Control is found, register the function print_start_time to fire when "BUTTON_START" has the event "ButtonPress" occur. + # If the "BUTTON_START" Control is found, register the function + # print_start_time to fire when "BUTTON_START" has the event "ButtonPress" + # occur. if Control.BUTTON_START in controls: - controller.register_control_callback(Control.BUTTON_START, [EventType.BUTTON_PRESS], print_start_time) + controller.register_control_callback( + Control.BUTTON_START, [EventType.BUTTON_PRESS], print_start_time) else: - print("Oops! Couldn't find the start button control! Is your controller connected?") + print("Oops! Couldn't find the start button control! Is your " + "controller connected?") exit() while True: await asyncio.sleep(1.0) + async def main(): # ... < INSERT CONNECTION CODE FROM ROBOT'S CODE SAMPLE TAB > # Get your controller from the robot. - my_controller = Controller.from_robot(robot=myRobotWithController, name="my_controller") + my_controller = Controller.from_robot( + robot=myRobotWithController, name="my_controller") # Run the handleController function. await handleController(my_controller) @@ -236,7 +243,8 @@ For more information, see the [Python SDK Docs](https://python.viam.dev/_modules ```python {class="line-numbers linkable-line-numbers"} # Get the controller from the robot. -my_controller = Controller.from_robot(robot=myRobotWithController, name="my_controller") +my_controller = Controller.from_robot( + robot=myRobotWithController, name="my_controller") # Get the most recent Event for each Control. recent_events = await my_controller.get_events() @@ -294,7 +302,8 @@ For more information, see the [Python SDK Docs](https://python.viam.dev/_modules ```python {class="line-numbers linkable-line-numbers"} # Get the controller from the robot. -my_controller = Controller.from_robot(robot=myRobotWithController, name="my_controller") +my_controller = Controller.from_robot( + robot=myRobotWithController, name="my_controller") # Get the list of Controls provided by the controller. controls = await my_controller.get_controls() @@ -357,9 +366,11 @@ For more information, see the [Python SDK Docs](https://python.viam.dev/_modules ```python {class="line-numbers linkable-line-numbers"} # Define a "Button is Pressed" event for the control BUTTON_START. -button_is_pressed_event = Event(time(), EventType.BUTTON_PRESS, Control.BUTTON_START, 1.0) +button_is_pressed_event = Event( + time(), EventType.BUTTON_PRESS, Control.BUTTON_START, 1.0) -# Trigger the event on your controller. Set this trigger to timeout if it has not completed in 7 seconds. +# Trigger the event on your controller. Set this trigger to timeout if it has +# not completed in 7 seconds. await myController.trigger_event(event=my_event, timeout=7.0) ``` @@ -414,7 +425,8 @@ If you are implementing your own input controller and add features that have no ```python {class="line-numbers linkable-line-numbers"} # Get the controller from the robot. -my_controller = Controller.from_robot(robot=myRobotWithController, name="my_controller") +my_controller = Controller.from_robot( + robot=myRobotWithController, name="my_controller") command = {"cmd": "test", "data1": 500} result = my_controller.do(command) diff --git a/docs/components/motor/_index.md b/docs/components/motor/_index.md index 495333b7a0..07d6693747 100644 --- a/docs/components/motor/_index.md +++ b/docs/components/motor/_index.md @@ -113,7 +113,7 @@ For more information, see the [Python SDK Docs](https://python.viam.dev/autoapi/ my_motor = Motor.from_robot(robot=robot, name="my_motor") # Set the power to 40% forwards. -await my_motor.set_power(power = 0.4) +await my_motor.set_power(power=0.4) ``` {{% /tab %}} @@ -370,7 +370,8 @@ For more information, see the [Python SDK Docs](https://python.viam.dev/autoapi/ ```python my_motor = Motor.from_robot(robot=robot, name="my_motor") -# Report a dictionary mapping optional properties to whether it is supported by this motor. +# Report a dictionary mapping optional properties to whether it is supported by +# this motor. properties = await my_motor.get_properties() # Print out the properties. diff --git a/docs/components/movement-sensor/_index.md b/docs/components/movement-sensor/_index.md index 30afb17e20..4e7fb83bf4 100644 --- a/docs/components/movement-sensor/_index.md +++ b/docs/components/movement-sensor/_index.md @@ -106,7 +106,9 @@ Supported by GPS models. For more information, see the [Python SDK Docs](https://python.viam.dev/autoapi/viam/components/movement_sensor/index.html#viam.components.movement_sensor.MovementSensor.get_position). ```python -my_movement_sensor = MovementSensor.from_robot(robot=robot, name="my_movement_sensor") +my_movement_sensor = MovementSensor.from_robot( + robot=robot, + name="my_movement_sensor") # Get the current position of the movement sensor. position = await my_movement_sensor.get_position() @@ -129,7 +131,8 @@ position = await my_movement_sensor.get_position() For more information, see the [Go SDK docs](https://pkg.go.dev/go.viam.com/rdk/components/movementsensor#MovementSensor). ```go -myMovementSensor, err := movementsensor.FromRobot(robot, "my_movement_sensor") +myMovementSensor, err := movementsensor.FromRobot( + robot, "my_movement_sensor") // Get the current position of the movement sensor. position, err := myMovementSensor.Position(context.Background(), nil) @@ -158,7 +161,8 @@ Supported by GPS models. For more information, see the [Python SDK Docs](https://python.viam.dev/autoapi/viam/components/movement_sensor/index.html#viam.components.movement_sensor.MovementSensor.get_linear_velocity). ```python -my_movement_sensor = MovementSensor.from_robot(robot=robot, name="my_movement_sensor") +my_movement_sensor = MovementSensor.from_robot( + robot=robot, name="my_movement_sensor") # Get the current linear velocity of the movement sensor. lin_vel = await my_movement_sensor.get_linear_velocity() @@ -209,7 +213,8 @@ Supported by IMU models and by `gyro-mpu6050`. For more information, see the [Python SDK Docs](https://python.viam.dev/autoapi/viam/components/movement_sensor/index.html#viam.components.movement_sensor.MovementSensor.get_angular_velocity). ```python -my_movement_sensor = MovementSensor.from_robot(robot=robot, name="my_movement_sensor") +my_movement_sensor = MovementSensor.from_robot( + robot=robot, name="my_movement_sensor") # Get the current angular velocity of the movement sensor. ang_vel = await my_movement_sensor.get_angular_velocity() @@ -266,7 +271,8 @@ Supported by IMU models, `accel-adxl345`, and `gyro-mpu6050`. For more information, see the [Python SDK Docs](https://python.viam.dev/autoapi/viam/components/movement_sensor/index.html#viam.components.movement_sensor.MovementSensor.get_linear_acceleration). ```python -my_movement_sensor = MovementSensor.from_robot(robot=robot, name="my_movement_sensor") +my_movement_sensor = MovementSensor.from_robot( + robot=robot, name="my_movement_sensor") # Get the current linear acceleration of the movement sensor. lin_accel = await my_movement_sensor.get_linear_acceleration() @@ -323,7 +329,8 @@ Supported by GPS models and `imu-vectornav`. For more information, see the [Python SDK Docs](https://python.viam.dev/autoapi/viam/components/movement_sensor/index.html#viam.components.movement_sensor.MovementSensor.get_compass_heading). ```python -my_movement_sensor = MovementSensor.from_robot(robot=robot, name="my_movement_sensor") +my_movement_sensor = MovementSensor.from_robot( + robot=robot, name="my_movement_sensor") # Get the current compass heading of the movement sensor. heading = await my_movement_sensor.get_compass_heading() @@ -374,7 +381,8 @@ Supported by IMU models. For more information, see the [Python SDK Docs](https://python.viam.dev/autoapi/viam/components/movement_sensor/index.html#viam.components.movement_sensor.MovementSensor.get_orientation). ```python -my_movement_sensor = MovementSensor.from_robot(robot=robot, name="my_movement_sensor") +my_movement_sensor = MovementSensor.from_robot( + robot=robot, name="my_movement_sensor") # Get the current orientation vector of the movement sensor. orientation = await my_movement_sensor.get_orientation() @@ -432,7 +440,8 @@ Get the supported properties of this sensor. For more information, see the [Python SDK Docs](https://python.viam.dev/autoapi/viam/components/movement_sensor/index.html#viam.components.movement_sensor.MovementSensor.get_properties). ```python -my_movement_sensor = MovementSensor.from_robot(robot=robot, name="my_movement_sensor") +my_movement_sensor = MovementSensor.from_robot( + robot=robot, name="my_movement_sensor") # Get the supported properties of the movement sensor. properties = await my_movement_sensor.get_properties() @@ -484,7 +493,8 @@ Supported by GPS models. For more information, see the [Python SDK Docs](https://python.viam.dev/autoapi/viam/components/movement_sensor/index.html#viam.components.movement_sensor.MovementSensor.get_accuracy). ```python -my_movement_sensor = MovementSensor.from_robot(robot=robot, name="my_movement_sensor") +my_movement_sensor = MovementSensor.from_robot( + robot=robot, name="my_movement_sensor") # Get the accuracy of the movement sensor. accuracy = await my_movement_sensor.get_accuracy() @@ -537,7 +547,8 @@ Contents depend on sensor model and can be of any type. For more information, see the [Python SDK Docs](https://python.viam.dev/autoapi/viam/components/movement_sensor/index.html#viam.components.movement_sensor.MovementSensor.get_readings). ```python -my_movement_sensor = MovementSensor.from_robot(robot=robot, name="my_movement_sensor") +my_movement_sensor = MovementSensor.from_robot( + robot=robot, name="my_movement_sensor") # Get the latest readings from the movement sensor. readings = await my_movement_sensor.get_readings() @@ -586,7 +597,8 @@ If you are implementing your own movement sensor and add features that have no b - [(Dict[str, Any])](https://docs.python.org/3/library/stdtypes.html#typesmapping): Result of the executed command. ```python {class="line-numbers linkable-line-numbers"} -my_movement_sensor = MovementSensor.from_robot(robot=robot, name="my_movement_sensor") +my_movement_sensor = MovementSensor.from_robot( + robot=robot, name="my_movement_sensor") reset_dict = { "command": "reset", diff --git a/docs/program/_index.md b/docs/program/_index.md index 3fb1f90fe1..216280f45e 100644 --- a/docs/program/_index.md +++ b/docs/program/_index.md @@ -94,6 +94,7 @@ import asyncio from viam.robot.client import RobotClient from viam.rpc.dial import Credentials, DialOptions + async def connect(): creds = Credentials( type='robot-location-secret', @@ -104,6 +105,7 @@ async def connect(): ) return await RobotClient.at_address('ADDRESS FROM THE VIAM APP', opts) + async def main(): robot = await connect() diff --git a/docs/program/apis/cloud.md b/docs/program/apis/cloud.md index 9c4d0990ed..3be32f7c48 100644 --- a/docs/program/apis/cloud.md +++ b/docs/program/apis/cloud.md @@ -34,24 +34,28 @@ import asyncio from viam.rpc.dial import DialOptions, Credentials from viam.app.viam_client import ViamClient + async def connect() -> ViamClient: dial_options = DialOptions( - auth_entity='mrroboto.this_is_just_an_example.viam.cloud', # The URL of a robot in the location. + # The URL of a robot in the location. + auth_entity='mrroboto.this_is_just_an_example.viam.cloud', credentials=Credentials( type='robot-location-secret', - payload='YOUR LOCATION SECRET' # The location secret + payload='YOUR LOCATION SECRET' # The location secret ) ) return await ViamClient.create_from_dial_options(dial_options) + async def main(): - # Make a ViamClient - viam_client = await connect() - # Instantiate an AppClient called "cloud" to run the cloud app API methods on - cloud = viam_client.app_client + # Make a ViamClient + viam_client = await connect() + # Instantiate an AppClient called "cloud" to run the cloud app API methods + # on + cloud = viam_client.app_client - viam_client.close() + viam_client.close() if __name__ == '__main__': asyncio.run(main()) @@ -136,7 +140,8 @@ Namespaces can only contain lowercase alphanumeric and dash characters. - [(bool)](https://docs.python.org/3/library/stdtypes.html#bltin-boolean-values): `True` if the provided namespace is available. ```python {class="line-numbers linkable-line-numbers"} -available = await cloud.get_organization_namespace_availability(public_namespace="my-cool-organization") +available = await cloud.get_organization_namespace_availability( + public_namespace="my-cool-organization") ``` For more information, see the [Python SDK Docs](https://python.viam.dev/autoapi/viam/app/app_client/index.html#viam.app.app_client.AppClient.get_organization_namespace_availability). @@ -168,7 +173,8 @@ If an invitation has only one authorization and you want to remove it, delete th ```python {class="line-numbers linkable-line-numbers"} from viam.proto.app import Authorization -authorization_to_add=Authorization( + +authorization_to_add = Authorization( authorization_type="some type of auth", authorization_id="identifier", resource_type="abc", @@ -211,7 +217,8 @@ Defaults to creating a location in your organization at root level if you do not - [(viam.proto.app.Location)](https://python.viam.dev/autoapi/viam/proto/app/index.html#viam.proto.app.Location): The newly created location. ```python {class="line-numbers linkable-line-numbers"} -my_new_location = await cloud.create_location(name="Robotville", parent_location_id="111ab12345") +my_new_location = await cloud.create_location(name="Robotville", + parent_location_id="111ab12345") ``` For more information, see the [Python SDK Docs](https://python.viam.dev/autoapi/viam/app/app_client/index.html#viam.app.app_client.AppClient.create_location). @@ -281,13 +288,15 @@ my_updated_location = await cloud.update_location( parent_location_id="xyz34xxxxx", ) -# The following line changes the name of the location without changing its parent location +# The following line changes the name of the location without changing its +# parent location my_updated_location = await cloud.update_location( location_id="abc12abcde", name="Land Before Robots" ) -# The following line moves the location back up to be a top level location without changing its name +# The following line moves the location back up to be a top level location +# without changing its name my_updated_location = await cloud.update_location( location_id="abc12abcde", name="", @@ -432,7 +441,8 @@ Delete a location secret. - [(string)](https://docs.python.org/3/library/stdtypes.html#text-sequence-type-str): ```python {class="line-numbers linkable-line-numbers"} -await cloud.delete_location_secret(secret_id="abcd123-456-7890ab-cxyz98-989898xyzxyz") +await cloud.delete_location_secret( + secret_id="abcd123-456-7890ab-cxyz98-989898xyzxyz") ``` For more information, see the [Python SDK Docs](https://python.viam.dev/autoapi/viam/app/app_client/index.html#viam.app.app_client.AppClient.delete_location_secret). diff --git a/docs/program/apis/robot.md b/docs/program/apis/robot.md index 193d873266..79e03dd459 100644 --- a/docs/program/apis/robot.md +++ b/docs/program/apis/robot.md @@ -307,7 +307,8 @@ Cancel all current and outstanding operations for the robot and stop all actuato For more information, see the [Python SDK Docs](https://python.viam.dev/autoapi/viam/robot/client/index.html#viam.robot.client.RobotClient.stop_all). ```python {class="line-numbers linkable-line-numbers"} -# Cancel all current and outstanding operations for the robot and stop all actuators and movement. +# Cancel all current and outstanding operations for the robot and stop all +# actuators and movement. await robot.stop_all() ``` diff --git a/docs/program/apis/sessions.md b/docs/program/apis/sessions.md index 8fc3e73e24..09fe922849 100644 --- a/docs/program/apis/sessions.md +++ b/docs/program/apis/sessions.md @@ -54,10 +54,10 @@ For example, if the heartbeat window is 5 seconds, clients will each send a hear You can adjust the heartbeat window in the configuration of your robot by adding the following to your robot's Raw JSON configuration: ``` json - ... // components {...}, services {...}, + ... // components {...}, services {...}, "network": { "sessions": { - "heartbeat_window": "30s" // Changes heartbeat window to 30 seconds + "heartbeat_window": "30s" // Changes heartbeat window to 30 seconds } }, ... @@ -88,7 +88,7 @@ If you want to disable it to keep any additional clients from authenticating to ```python {class="line-numbers linkable-line-numbers"} async def main(): - opts = RobotClient.Options(disable_sessions=True, ...) + opts = RobotClient.Options(disable_sessions=True) await RobotClient.at_address("my-robot-address", opts) robot = await connect() ``` diff --git a/docs/program/run.md b/docs/program/run.md index fe2fb60ea4..8d2f045d55 100644 --- a/docs/program/run.md +++ b/docs/program/run.md @@ -87,16 +87,17 @@ Your SDK code should now look like: ```python {class="line-numbers linkable-line-numbers"} async def connect(): - creds = Credentials(type='robot-location-secret', payload=YOUR_LOCATION_SECRET) - opts = RobotClient.Options( - refresh_interval=0, - dial_options=DialOptions( - credentials=creds, - disable_webrtc=True, - auth_entity="" + creds = Credentials(type='robot-location-secret', + payload=YOUR_LOCATION_SECRET) + opts = RobotClient.Options( + refresh_interval=0, + dial_options=DialOptions( + credentials=creds, + disable_webrtc=True, + auth_entity="" + ) ) - ) - return await RobotClient.at_address('localhost:8080', opts) + return await RobotClient.at_address('localhost:8080', opts) ``` Your localhost can now make a secure connection to `viam-server` locally. diff --git a/docs/services/frame-system/_index.md b/docs/services/frame-system/_index.md index 3d77779f50..c03ab3a199 100644 --- a/docs/services/frame-system/_index.md +++ b/docs/services/frame-system/_index.md @@ -292,10 +292,12 @@ For more information, see the [Python SDK Docs](https://python.viam.dev/autoapi/ first_pose = Pose(x=0.0, y=0.0, z=0.0, o_x=0.0, o_y=0.0, o_z=1.0, theta=0.0) first_pif = PoseInFrame(reference_frame="world", pose=first_pose) transformed_pif = await robot.transform_pose(first_pif, "myArm") -print("Position: (x:", transformed_pif.pose.x, ", y:", transformed_pif.pose.y, ", z:", transformed_pif.pose.z, ")") +print("Position: (x:", transformed_pif.pose.x, + ", y:", transformed_pif.pose.y, + ", z:", transformed_pif.pose.z, ")") print("Orientation: (o_x:", transformed_pif.pose.o_x, ", o_y:", transformed_pif.pose.o_y, - ", o_z:",transformed_pif.pose.o_z, + ", o_z:", transformed_pif.pose.o_z, ", theta:", transformed_pif.pose.theta, ")") ``` diff --git a/docs/services/motion/_index.md b/docs/services/motion/_index.md index aef77e50a3..bb77f14efc 100644 --- a/docs/services/motion/_index.md +++ b/docs/services/motion/_index.md @@ -125,7 +125,12 @@ my_frame = "my_gripper_offset" goal_pose = Pose(x=0, y=0, z=300, o_x=0, o_y=0, o_z=1, theta=0) # Move the gripper -moved = await motion.move(component_name=gripper_name, destination=PoseInFrame(reference_frame="myFrame", pose=goal_pose), world_state=worldState, constraints={}, extra={}) +moved = await motion.move(component_name=gripper_name, + destination=PoseInFrame(reference_frame="myFrame", + pose=goal_pose), + world_state=worldState, + constraints={}, + extra={}) ``` {{% /tab %}} @@ -240,7 +245,8 @@ robot = await connect() motion = MotionClient.from_robot(robot=robot, name="builtin") gripperName = Gripper.get_resource_name("my_gripper") -gripperPoseInWorld = await robot.get_pose(component_name=gripperName, destination_frame="world") +gripperPoseInWorld = await robot.get_pose(component_name=gripperName, + destination_frame="world") ``` For a more complicated example, take the same scenario and get the pose of the same gripper with respect to an object situated at a location (100, 200, 0) relative to the "world" frame: @@ -256,7 +262,8 @@ robot = await connect() motion = MotionClient.from_robot(robot=robot, name="builtin") objectPose = Pose(x=100, y=200, z=0, o_x=0, o_y=0, o_z=1, theta=0) objectPoseInFrame = PoseInFrame(reference_frame="world", pose=objectPose) -objectTransform = Transform(reference_frame="object", pose_in_observer_frame=objectPoseInFrame) +objectTransform = Transform(reference_frame="object", + pose_in_observer_frame=objectPoseInFrame) gripperName = Gripper.get_resource_name("my_gripper") gripperPoseInObjectFrame = await motion.get_pose( component_name=gripperName, @@ -357,11 +364,15 @@ motion = MotionClient.from_robot(robot=robot, name="builtin") my_base_resource_name = Base.get_resource_name("my_base") my_slam_resource_name = SLAMClient.get_resource_name("my_slam_service") -# Define a destination pose with respect to the origin of the map from the SLAM service "my_slam_service" +# Define a destination pose with respect to the origin of the map from the SLAM +# service "my_slam_service" my_pose = Pose(y=10) -# Move the base component to the destination pose of Y=10, a location of (0, 10, 0) in respect to the origin of the map -success = await motion.move_on_map(component_name=my_base_resource_name, destination=my_pose, slam_service_name=my_slam_resource_name) +# Move the base component to the destination pose of Y=10, a location of +# (0, 10, 0) in respect to the origin of the map +success = await motion.move_on_map(component_name=my_base_resource_name, + destination=my_pose, + slam_service_name=my_slam_resource_name) ``` {{% /tab %}} @@ -460,12 +471,17 @@ motion = MotionClient.from_robot(robot=robot, name="builtin") # Get the ResourceNames of the base and movement sensor my_base_resource_name = Base.get_resource_name("my_base") -mvmnt_sensor_resource_name = MovementSensor.get_resource_name("my_movement_sensor") +mvmnt_sensor_resource_name = MovementSensor.get_resource_name( + "my_movement_sensor") # Define a destination GeoPoint at the GPS coordinates [0, 0] my_destination = movement_sensor.GeoPoint(latitude=0, longitude=0) -# Move the base component to the designated geographic location, as reported by the movement sensor -success = await motion.move_on_globe(component_name=my_base_resource_name, destination=my_destination, movement_sensor_name=mvmnt_sensor_resource_name) +# Move the base component to the designated geographic location, as reported by +# the movement sensor +success = await motion.move_on_globe( + component_name=my_base_resource_name, + destination=my_destination, + movement_sensor_name=mvmnt_sensor_resource_name) ``` {{% /tab %}} diff --git a/docs/services/motion/constraints.md b/docs/services/motion/constraints.md index 5014329d8a..95bd49251f 100644 --- a/docs/services/motion/constraints.md +++ b/docs/services/motion/constraints.md @@ -39,9 +39,10 @@ moved = await motion.move( pose=goal_pose), world_state=worldState, constraints={ - Constraints(linear_constraint = [LinearConstraint(line_tolerance_mm=0.2)]) - }, - extra={}) + Constraints( + linear_constraint=[LinearConstraint(line_tolerance_mm=0.2)]) + }, + extra={}) ``` You can find more information in the [Python SDK Docs](https://python.viam.dev/autoapi/viam/gen/service/motion/v1/motion_pb2/index.html#viam.gen.service.motion.v1.motion_pb2.Constraints). @@ -84,14 +85,14 @@ If set to zero, a movement with identical starting and ending orientations will {{% tab name="Python" %}} ```python {class="line-numbers linkable-line-numbers"} -## Move a gripper with an orientation constraint +# Move a gripper with an orientation constraint moved = await motion.move( component_name=my_gripper, destination=PoseInFrame( reference_frame="my_frame", pose=goal_pose), world_state=worldState, - constraints = Constraints(orientation_constraint = [OrientationConstraint()]) + constraints=Constraints(orientation_constraint=[OrientationConstraint()]), extra={}) ``` diff --git a/docs/services/navigation/_index.md b/docs/services/navigation/_index.md index 7449bb1011..484c4adec2 100644 --- a/docs/services/navigation/_index.md +++ b/docs/services/navigation/_index.md @@ -283,7 +283,8 @@ For more information, see the [Python SDK Docs](https://python.viam.dev/autoapi/ ```python my_nav = NavigationClient.from_robot(robot=robot, name="my_nav_service") -# Set the Mode the service is operating in to MODE_WAYPOINT and begin navigation +# Set the Mode the service is operating in to MODE_WAYPOINT and begin +# navigation await my_nav.set_mode(Mode.ValueType.MODE_WAYPOINT) ``` @@ -666,7 +667,7 @@ The following {{< glossary_tooltip term_id="model" text="models" >}} of [movemen An example of a `Linear Velocity` reading: ``` go -// linearVelocity is an r3.Vector with X, Y, and Z magnitudes +// linearVelocity is an r3.Vector with X, Y, and Z magnitudes linearVelocity, err := imu.LinearVelocity(context.Background, nil) ``` diff --git a/docs/services/slam/_index.md b/docs/services/slam/_index.md index d063df35c6..426fb78d44 100644 --- a/docs/services/slam/_index.md +++ b/docs/services/slam/_index.md @@ -90,7 +90,8 @@ For more information, see the [Python SDK Docs](https://python.viam.dev/autoapi/ ```python {class="line-numbers linkable-line-numbers"} slam_svc = SLAMClient.from_robot(robot=robot, name="my_slam_service") -# Get the current position of the specified source component in the SLAM map as a Pose. +# Get the current position of the specified source component in the SLAM map as +# a Pose. pose = await slam.get_position() ``` @@ -186,7 +187,8 @@ For more information, see the [Python SDK Docs](https://python.viam.dev/autoapi/ ```python {class="line-numbers linkable-line-numbers"} slam = SLAMClient.from_robot(robot=robot, name="my_slam_service") -# Get the internal state of the SLAM algorithm required to continue mapping/localization. +# Get the internal state of the SLAM algorithm required to continue +# mapping/localization. internal_state = await slam.get_internal_state() ``` diff --git a/docs/services/vision/_index.md b/docs/services/vision/_index.md index 708c81c5af..a6a9d6f79c 100644 --- a/docs/services/vision/_index.md +++ b/docs/services/vision/_index.md @@ -302,7 +302,8 @@ cam1 = Camera.from_robot(robot, "cam1") # Grab the classifier you configured on your robot my_classifier = VisionClient.from_robot(robot, "my_classifier") -# Get the 2 classifications with the highest confidence scores from the next image from the camera +# Get the 2 classifications with the highest confidence scores from the next +# image from the camera classifications = await my_classifier.get_classifications_from_camera(cam1, 2) ``` diff --git a/docs/services/vision/classification.md b/docs/services/vision/classification.md index 4a5c259f40..95d68deb72 100644 --- a/docs/services/vision/classification.md +++ b/docs/services/vision/classification.md @@ -139,7 +139,8 @@ cam1 = Camera.from_robot(robot, "cam1") # Grab Viam's vision service for the classifier my_classifier = VisionClient.from_robot(robot, "my_classifier") -# Get the top 2 classifications with the highest confidence scores from the camera output +# Get the top 2 classifications with the highest confidence scores from the +# camera output classifications = await my_classifier.get_classifications_from_camera(img, 2) # If you need to store the image, get the image first diff --git a/docs/tutorials/configure/build-a-mock-robot.md b/docs/tutorials/configure/build-a-mock-robot.md index 0161a03c96..6018237e83 100644 --- a/docs/tutorials/configure/build-a-mock-robot.md +++ b/docs/tutorials/configure/build-a-mock-robot.md @@ -253,14 +253,15 @@ Now that your mock arm has been initialized, you can write some code to control {{% tab name="Python" %}} ```python {class="line-numbers linkable-line-numbers"} -# Gets a random position for each servo on the arm that is within the safe range of motion of the arm. -# Returns a new array of safe joint positions. +# Gets a random position for each servo on the arm that is within the safe +# range of motion of the arm. Returns a new array of safe joint positions. def getRandoms(): return [random.randint(-90, 90), - random.randint(-120, -45), - random.randint(-45, 45), - random.randint(-45, 45), - random.randint(-45, 45)] + random.randint(-120, -45), + random.randint(-45, 45), + random.randint(-45, 45), + random.randint(-45, 45)] + # Moves the arm to a new random position every second async def randomMovement(arm: ArmClient): diff --git a/docs/tutorials/custom/custom-base-dog.md b/docs/tutorials/custom/custom-base-dog.md index 1883a1ff09..a1c85b3379 100644 --- a/docs/tutorials/custom/custom-base-dog.md +++ b/docs/tutorials/custom/custom-base-dog.md @@ -143,8 +143,8 @@ Paste the following code snippet into the file you created: ```python {class="line-numbers linkable-line-numbers"} # dog_test.py is for testing the connection - -import socket, time +import socket +import time s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect(("PASTE DOG IP ADDRESS HERE", 5001)) @@ -265,7 +265,9 @@ class robotdog(Base, Reconfigurable): # Constructor @classmethod - def new(cls, config: ComponentConfig, dependencies: Mapping[ResourceName, ResourceBase]) -> Self: + def new(cls, + config: ComponentConfig, + dependencies: Mapping[ResourceName, ResourceBase]) -> Self: my_class = cls(config.name) my_class.reconfigure(config, dependencies) return my_class @@ -279,7 +281,8 @@ class robotdog(Base, Reconfigurable): raise ValueError("No IP address provided") port = config.attributes.fields["port"].number_value - # Per the Freenove code, 5001 is for sending/receiving instructions. Port 8001 is used for video. + # Per the Freenove code, 5001 is for sending/receiving instructions. + # Port 8001 is used for video. if port == "": port = 5001 return @@ -292,7 +295,9 @@ class robotdog(Base, Reconfigurable): LOGGER.error(e) # Handles attribute reconfiguration - def reconfigure(self, config: ComponentConfig, dependencies: Mapping[ResourceName, ResourceBase]): + def reconfigure(self, + config: ComponentConfig, + dependencies: Mapping[ResourceName, ResourceBase]): # Here we initialize the resource instance ip_address = config.attributes.fields["ip_address"].string_value port = config.attributes.fields["port"].number_value @@ -314,11 +319,11 @@ It defines each method by specifying which corresponding commands to send to the For example, the `stop` method sends a command (`CMD_MOVE_STOP#8`) to the robot dog server to stop the dog from moving: ```python - async def stop(self, extra: Optional[Dict[str, Any]] = None, **kwargs): - self.is_stopped = True +async def stop(self, extra: Optional[Dict[str, Any]] = None, **kwargs): + self.is_stopped = True - command = "CMD_MOVE_STOP#8\n" - self.send_data(command) + command = "CMD_MOVE_STOP#8\n" + self.send_data(command) ``` Copy and paste that code into your robotdog.py file. diff --git a/docs/tutorials/get-started/confetti-bot.md b/docs/tutorials/get-started/confetti-bot.md index 31ee54ae1f..93c9670030 100644 --- a/docs/tutorials/get-started/confetti-bot.md +++ b/docs/tutorials/get-started/confetti-bot.md @@ -279,7 +279,8 @@ In your `main` function add the following code, which instantiates a variable `p ```python {class="line-numbers linkable-line-numbers"} party = Board.from_robot(robot, "party") -# Note that the pin supplied is the pin we use. Please change this to the pin you are using. +# Note that the pin supplied is the pin we use. Please change this to the pin +# you are using. party_return_value = await party.gpio_pin_by_name("37") print(f"party gpio_pin_by_name return value: {party_return_value.get()}") @@ -294,10 +295,10 @@ Copy this code and add it to your own code within the main function block: ```python {class="line-numbers linkable-line-numbers"} while True: print(party_return_value.get()) - while (await party_return_value.get()) == True: + while (await party_return_value.get()): await start.set_power(.8) await asyncio.sleep(0.1) - if (await GPIO.get()) == False: + if not (await GPIO.get()): break await start.set_power(0) ``` diff --git a/docs/tutorials/get-started/lazy-susan.md b/docs/tutorials/get-started/lazy-susan.md index 880937a5a1..b141208c82 100644 --- a/docs/tutorials/get-started/lazy-susan.md +++ b/docs/tutorials/get-started/lazy-susan.md @@ -308,8 +308,8 @@ nano turnsusan.py You can start by adding this call to `set_power` to your `main function` above the line that closes the robot connection with `robot.close()`: ```python - #Use the set power method before you close the code loop - await dc_motor.set_power(power = 0.2) +# Use the set power method before you close the code loop +await dc_motor.set_power(power=0.2) ``` Enter CTRL+X to save and exit: press y when prompted to accept changes, and the return key to accept the original filename. diff --git a/docs/tutorials/get-started/try-viam-sdk.md b/docs/tutorials/get-started/try-viam-sdk.md index 1c4b772f11..bdc87bc852 100644 --- a/docs/tutorials/get-started/try-viam-sdk.md +++ b/docs/tutorials/get-started/try-viam-sdk.md @@ -495,6 +495,7 @@ from viam.components.base import Base from viam.robot.client import RobotClient from viam.rpc.dial import Credentials, DialOptions + async def connect(): creds = Credentials( type='robot-location-secret', @@ -505,6 +506,7 @@ async def connect(): ) return await RobotClient.at_address('ADDRESS_FROM_VIAM_APP', opts) + async def moveInSquare(base): for _ in range(4): # moves the rover forward 500mm at 500mm/s @@ -514,6 +516,7 @@ async def moveInSquare(base): await base.spin(velocity=100, angle=90) print("spin 90 degrees") + async def main(): robot = await connect() diff --git a/docs/tutorials/projects/claw-game.md b/docs/tutorials/projects/claw-game.md index 7993b14988..2d3faaae2b 100644 --- a/docs/tutorials/projects/claw-game.md +++ b/docs/tutorials/projects/claw-game.md @@ -505,8 +505,10 @@ from viam.rpc.dial import Credentials, DialOptions from viam.components.board import Board from viam.components.arm import Arm from viam.services.motion import MotionClient -from viam.proto.common import Pose, PoseInFrame, Vector3, Geometry, GeometriesInFrame, RectangularPrism, WorldState -from viam.proto.service.motion import Constraints, LinearConstraint, OrientationConstraint +from viam.proto.common import Pose, PoseInFrame, Vector3, Geometry, \ + GeometriesInFrame, RectangularPrism, WorldState +from viam.proto.service.motion import Constraints, LinearConstraint, \ + OrientationConstraint ``` Then it creates an argument parser, defining required and optional arguments to create a user-friendly command line interface: @@ -540,7 +542,7 @@ Then we define the [constraints](https://python.viam.dev/autoapi/viam/proto/serv The orientation constraint places a restriction on the orientation change during a motion, as the arm in a claw game should always face down so the gripper is always in a position where is can descend and grab a prize: ```python -constraints = Constraints(orientation_constraint = [OrientationConstraint()]) +constraints = Constraints(orientation_constraint=[OrientationConstraint()]) ``` Next the code imports the obstacles.json file and defines the `world_state` representing the robot's physical environment: @@ -564,11 +566,16 @@ def get_world_state(): theta=orientation['th'], ) dims = Vector3(x=geometry['x'], y=geometry['y'], z=geometry['z']) - world_state_obstacles.append(Geometry(center=center, box=RectangularPrism(dims_mm=dims), label=geometry['label'])) + world_state_obstacles.append( + Geometry(center=center, + box=RectangularPrism(dims_mm=dims), + label=geometry['label'])) - obstacles_in_frame = GeometriesInFrame(reference_frame="world", geometries=world_state_obstacles) + obstacles_in_frame = GeometriesInFrame(reference_frame="world", + geometries=world_state_obstacles) return WorldState(obstacles=[obstacles_in_frame]) + world_state = get_world_state() ``` @@ -576,14 +583,15 @@ Next, the code defines a grab function to use GPIO to open and close the gripper ```python {class="line-numbers linkable-line-numbers"} async def grab(board, doGrab): - # Note that the pin supplied is a placeholder. Please change this to a valid pin you are using. + # Note that the pin supplied is a placeholder. Please change this to a + # valid pin you are using. pin = await board.gpio_pin_by_name('8') - if doGrab == True: + if doGrab: # opens the gripper/release await pin.set(True) else: - # closes the gripper/grab - await pin.set(False) + # closes the gripper/grab + await pin.set(False) ``` Lastly, the code defines the functions `move_absolute()`, `home()`, `move_to_offset()` and `move_z()`, which construct new pose requests to send to the [motion service](/services/motion/). @@ -591,7 +599,11 @@ Lastly, the code defines the functions `move_absolute()`, `home()`, `move_to_off ```python {class="line-numbers linkable-line-numbers"} async def move_absolute(arm, motion_service, pose): destination = PoseInFrame(reference_frame="world", pose=pose) - await motion_service.move(component_name=arm, destination=destination, world_state=world_state, constraints=constraints) + await motion_service.move(component_name=arm, + destination=destination, + world_state=world_state, + constraints=constraints) + async def home(arm, motion_service): # Makes sure to first move the arm up in z axis @@ -600,11 +612,18 @@ async def home(arm, motion_service): # Generate a sample "home" pose around the drop hole and demonstrate motion home_pose_in_frame = PoseInFrame(reference_frame="world", pose=home_pose) - await motion_service.move(component_name=arm, destination=home_pose_in_frame, world_state=world_state, constraints=constraints) + await motion_service.move(component_name=arm, + destination=home_pose_in_frame, + world_state=world_state, + constraints=constraints) + async def move_to_offset(arm, motion_service, offset): # Get current position of the arm - current_position = await motion_service.get_pose(component_name=arm, destination_frame = "", supplemental_transforms = None) + current_position = await motion_service.get_pose( + component_name=arm, + destination_frame="", + supplemental_transforms=None) print('current position: ', current_position) # Calculate new pose to move the arm to @@ -614,36 +633,45 @@ async def move_to_offset(arm, motion_service, offset): z=current_position.pose.z + offset.z, o_x=0, o_y=0, - o_z=-1, # negative z means claw will point down + o_z=-1, # negative z means claw will point down theta=0 ) print('moving to position: ', pose) # Move arm destination = PoseInFrame(reference_frame="world", pose=pose) - await motion_service.move(component_name=arm, destination=destination, world_state=world_state, constraints=constraints) + await motion_service.move(component_name=arm, + destination=destination, + world_state=world_state, + constraints=constraints) async def move_z(arm, motion_service, z): # Get current position of the arm - current_position = await motion_service.get_pose(component_name=arm, destination_frame = "", supplemental_transforms = None) + current_position = await motion_service.get_pose( + component_name=arm, + destination_frame="", + supplemental_transforms=None) print('current_position: ', current_position) # Construct new pose to get to desired z position pose = Pose( x=current_position.pose.x, y=current_position.pose.y, - z = z, - o_x= 0, + z=z, + o_x=0, o_y=0, - o_z=-1, # negative z means claw will point down + o_z=-1, # negative z means claw will point down theta=0 ) print('moving to position: ', pose) # Move arm destination = PoseInFrame(reference_frame="world", pose=pose) - await motion_service.move(component_name=arm, destination=destination, world_state=world_state, constraints=constraints) + await motion_service.move(component_name=arm, + destination=destination, + world_state=world_state, + constraints=constraints) ``` The `main()` function initializes different resources and then handles command line arguments to move the arm in a sequence for testing and debugging: @@ -660,8 +688,8 @@ async def main(): # myBoard my_board = Board.from_robot(robot, "myBoard") # my Subpart name, arm - my_arm_resource= Arm.get_resource_name("planning:myArm") - my_arm_resource.name= "myArm" + my_arm_resource = Arm.get_resource_name("planning:myArm") + my_arm_resource.name = "myArm" print("arm resource", my_arm_resource) commands = [args.command] @@ -684,19 +712,27 @@ async def main(): if command == "left": print("will move left") # Moves the arm's y position to left - await move_to_offset(my_arm_resource, motion_service, Vector3(x=0, y=-move_increment, z=0)) + await move_to_offset(my_arm_resource, + motion_service, + Vector3(x=0, y=-move_increment, z=0)) if command == "right": print("will move right") # Moves the arm's y position to right - await move_to_offset(my_arm_resource, motion_service, Vector3(x=0, y=move_increment, z=0)) + await move_to_offset(my_arm_resource, + motion_service, + Vector3(x=0, y=move_increment, z=0)) if command == "forward": print("will move forward") # Moves the arm's x position to forward - await move_to_offset(my_arm_resource, motion_service, Vector3(x=move_increment, y=0, z=0)) + await move_to_offset(my_arm_resource, + motion_service, + Vector3(x=move_increment, y=0, z=0)) if command == "backward": print("will move backward") # Moves the arm's x position to backwards - await move_to_offset(my_arm_resource, motion_service, Vector3(x=-move_increment, y=0, z=0)) + await move_to_offset(my_arm_resource, + motion_service, + Vector3(x=-move_increment, y=0, z=0)) if command == "grab": print("will grab") # Closes the gripper @@ -709,8 +745,13 @@ async def main(): print("will sleep one second") await asyncio.sleep(1) if command == "test": - print("will move to the test position, drop, grab, return home and release") - await move_absolute(my_arm_resource, motion_service, Pose(x=0.0, y=380, z=home_plane, o_x=0, o_y=0, o_z=-1, theta=0)) + print("""will move to the test position, drop, grab, return home + and release""") + await move_absolute( + my_arm_resource, + motion_service, + Pose(x=0.0, y=380, z=home_plane, o_x=0, o_y=0, o_z=-1, theta=0) + ) await move_z(my_arm_resource, motion_service, grab_plane) await grab(my_board, True) await home(my_arm_resource, motion_service) @@ -732,7 +773,8 @@ python3 CLI-test.py --password mypass --location mylocation --command grab Or, you can run sequences of these commands together, for example: ```sh {class="command-line" data-prompt="$"} -python3 CLI-test.py --password mypass --location mylocation --command sequence --sequence grab,sleep,release,sleep,grab,sleep,release +python3 CLI-test.py --password mypass --location mylocation --command sequence \ + --sequence grab,sleep,release,sleep,grab,sleep,release ``` Now that the arm is set up and you have a CLI script you can use for testing - try testing the motion and claw grab with different items. diff --git a/docs/tutorials/projects/guardian.md b/docs/tutorials/projects/guardian.md index 087b0e4ec1..c7c79706d0 100644 --- a/docs/tutorials/projects/guardian.md +++ b/docs/tutorials/projects/guardian.md @@ -464,6 +464,7 @@ from viam.components.camera import Camera from viam.components.servo import Servo from viam.services.vision import VisionClient + async def connect(): creds = Credentials( type='robot-location-secret', @@ -551,7 +552,8 @@ Underneath the `check_for_living_creatures()` function, add the following functi If a creature is detected, the red LEDs will light up and music will play. ```python {class="line-numbers linkable-line-numbers"} -async def idle_and_check_for_living_creatures(cam, detector, servo, blue_leds, red_leds, music_player): +async def idle_and_check_for_living_creatures( + cam, detector, servo, blue_leds, red_leds, music_player): living_creature = None while True: random_number_checks = random.randint(0, 5) @@ -650,7 +652,8 @@ async def main(): detector = VisionClient.from_robot(robot, "detector") while True: # move head periodically left and right until movement is spotted. - living_creature = await idle_and_check_for_living_creatures(cam, detector, servo, blue_leds, red_leds, music_player) + living_creature = await idle_and_check_for_living_creatures( + cam, detector, servo, blue_leds, red_leds, music_player) await focus_on_creature(living_creature, img.width, servo) # Don't forget to close the robot when you're done! await robot.close() @@ -760,7 +763,8 @@ async def connect(): refresh_interval=0, dial_options=DialOptions(credentials=creds) ) - return await RobotClient.at_address('guardian-main.vw3iu72d8n.viam.cloud', opts) + return await RobotClient.at_address('guardian-main.vw3iu72d8n.viam.cloud', + opts) async def check_for_living_creatures(detections): @@ -806,7 +810,12 @@ class LedGroup: await pin.set(on) -async def idle_and_check_for_living_creatures(cam, detector, servo, blue_leds, red_leds, music_player): +async def idle_and_check_for_living_creatures(cam, + detector, + servo, + blue_leds, + red_leds, + music_player): living_creature = None while True: random_number_checks = random.randint(0, 5) @@ -854,7 +863,8 @@ async def main(): detector = VisionClient.from_robot(robot, "detector") while True: # move head periodically left and right until movement is spotted. - living_creature = await idle_and_check_for_living_creatures(cam, detector, servo, blue_leds, red_leds, music_player) + living_creature = await idle_and_check_for_living_creatures( + cam, detector, servo, blue_leds, red_leds, music_player) await focus_on_creature(living_creature, img.width, servo) # Don't forget to close the robot when you're done! await robot.close() diff --git a/docs/tutorials/projects/make-a-plant-watering-robot.md b/docs/tutorials/projects/make-a-plant-watering-robot.md index 3edd82b13c..8722c3d97b 100644 --- a/docs/tutorials/projects/make-a-plant-watering-robot.md +++ b/docs/tutorials/projects/make-a-plant-watering-robot.md @@ -385,17 +385,20 @@ import RPi.GPIO as GPIO from typing import Any, Dict, List, Mapping, Optional + class MoistureSensor(Sensor): def __init__(self, name: str): - super(MoistureSensor,self).__init__(name) + super(MoistureSensor, self).__init__(name) sensor_pin = 4 GPIO.setmode(GPIO.BCM) GPIO.setup(sensor_pin, GPIO.IN) - # Implement the Viam Sensor API's get_readings() method - async def get_readings(self, *, extra: Optional[Mapping[str, Any]] = None, timeout: Optional[float] = None, **kwargs): + async def get_readings(self, + *, + extra: Optional[Mapping[str, Any]] = None, + timeout: Optional[float] = None, **kwargs): x = 0 input = [] @@ -411,20 +414,21 @@ class MoistureSensor(Sensor): # Create an analog input channel on Pin 0 chan = AnalogIn(mcp, MCP.P0) - while x<10: + while x < 10: # read adc channel 0 reading = chan.value input.append(reading) - x+=1 + x += 1 return {'moisture': input} + async def main(): - srv = Server(resources=[MoistureSensor("moisture_sensor")]) - await srv.serve() + srv = Server(resources=[MoistureSensor("moisture_sensor")]) + await srv.serve() if __name__ == "__main__": - asyncio.run(main()) + asyncio.run(main()) ``` You can modify this example code as necessary. @@ -545,7 +549,8 @@ Use the Viam [motor](/components/motor/#api) and [sensor](/components/sensor/#co You can get your components from the robot like this: ```python -sensor = Sensor.from_robot(robot=robot, name='moisture_sensor') # Note that this name, `moisture_sensor`, is defined in sensor.py +# Note that this name, `moisture_sensor`, is defined in sensor.py +sensor = Sensor.from_robot(robot=robot, name='moisture_sensor') water_pump = Motor.from_robot(robot=robot, name='water-pump') ``` @@ -554,23 +559,26 @@ And you can add your system logic to run continuously like this: ```python while True: - # Get the moisture sensor's readings - readings = await sensor.get_readings() - soil_moisture = readings.get('moisture') + # Get the moisture sensor's readings + readings = await sensor.get_readings() + soil_moisture = readings.get('moisture') - # Calculate average moisture reading from the list of readings, to account for outliers - avg_moisture = sum(soil_moisture) / len(soil_moisture) + # Calculate average moisture reading from the list of readings, to account + # for outliers + avg_moisture = sum(soil_moisture) / len(soil_moisture) - # If the average moisture reading is greater than 60000, trigger pump watering - if(avg_moisture > 60000): - print('this plant is too thirsty! giving it more water') + # If the average moisture reading is greater than 60000, trigger pump + # watering + if (avg_moisture > 60000): + print('this plant is too thirsty! giving it more water') - # Run the water pump for 100 rev. at 1000 rpm - await water_pump.go_for(rpm=1000, revolutions=100) + # Run the water pump for 100 rev. at 1000 rpm + await water_pump.go_for(rpm=1000, revolutions=100) - # Wait 60 seconds so that the water can soak into the soil a bit before trying to water again - print('waiting a little bit for water to soak in') - time.sleep(60) + # Wait 60 seconds so that the water can soak into the soil a bit before + # trying to water again + print('waiting a little bit for water to soak in') + time.sleep(60) ``` {{% alert title="Tip" color="tip" %}} diff --git a/docs/tutorials/projects/pet-treat-dispenser.md b/docs/tutorials/projects/pet-treat-dispenser.md index 1656f11488..5c79953eac 100644 --- a/docs/tutorials/projects/pet-treat-dispenser.md +++ b/docs/tutorials/projects/pet-treat-dispenser.md @@ -385,7 +385,8 @@ The following code initializes a camera and the puppyclassifier and shows you ho ```python petcam = Camera.from_robot(robot, "petcam") puppyclassifier = VisionClient.from_robot(robot, "puppyclassifier") -classifications = await puppyclassifier.get_classifications_from_camera(camera_name) +classifications = await puppyclassifier.get_classifications_from_camera( + camera_name) ``` Remove the existing code in the `main` function and replace it with the following logic where the the code gets classifications from the puppyclassifier based on the camera stream, and if a pet is found, activates the stepper motor using the [`go_for()` method](https://python.viam.dev/autoapi/viam/components/motor/index.html#viam.components.motor.Motor.go_for) to move a certain amount to dispense a treat. @@ -394,7 +395,8 @@ Remove the existing code in the `main` function and replace it with the followin async def main(): robot = await connect() - # robot components + services below, update these based on how you named them in configuration + # robot components + services below, update these based on how you named + # them in configuration pi = Board.from_robot(robot, "pi") petcam = Camera.from_robot(robot, "petcam") stepper = Motor.from_robot(robot, "stepper") @@ -403,7 +405,8 @@ async def main(): while True: # look if the camera is seeing the dog found = False - classifications = await puppyclassifier.get_classifications_from_camera(camera_name) + classifications = await \ + puppyclassifier.get_classifications_from_camera(camera_name) for d in classifications: # check if the model is confident in the classification if d.confidence > 0.7: @@ -416,7 +419,7 @@ async def main(): # turn on the stepper motor print("giving snack") state = "on" - await stepper.go_for(rpm=80,revolutions=2) + await stepper.go_for(rpm=80, revolutions=2) # stops the treat from constantly being dispensed time.sleep(300) @@ -481,6 +484,7 @@ robot_address = os.getenv('ROBOT_ADDRESS') or '' # change this if you named your camera differently in your robot configuration camera_name = os.getenv('ROBOT_CAMERA') or 'petcam' + async def connect(): creds = Credentials( type='robot-location-secret', @@ -491,10 +495,12 @@ async def connect(): ) return await RobotClient.at_address(robot_address, opts) + async def main(): robot = await connect() - # robot components + services below, update these based on how you named them in configuration + # robot components + services below, update these based on how you named + # them in configuration pi = Board.from_robot(robot, "pi") petcam = Camera.from_robot(robot, "petcam") stepper = Motor.from_robot(robot, "stepper") @@ -503,7 +509,8 @@ async def main(): while True: # look if the camera is seeing the dog found = False - classifications = await puppyclassifier.get_classifications_from_camera(camera_name) + classifications = await \ + puppyclassifier.get_classifications_from_camera(camera_name) for d in classifications: # check if the model is confident in the classification if d.confidence > 0.7: @@ -516,7 +523,7 @@ async def main(): # turn on the stepper motor print("giving snack") state = "on" - await stepper.go_for(rpm=80,revolutions=2) + await stepper.go_for(rpm=80, revolutions=2) # stops the treat from constantly being dispensed time.sleep(300) diff --git a/docs/tutorials/projects/send-security-photo.md b/docs/tutorials/projects/send-security-photo.md index 2dcb10fb4e..a6020ad362 100644 --- a/docs/tutorials/projects/send-security-photo.md +++ b/docs/tutorials/projects/send-security-photo.md @@ -198,7 +198,6 @@ As an example, if you have T-Mobile your code will look like this: ```python yag.send('xxxxxxxxxx@tmomail.net', 'subject', contents) - ``` This allows you to route the email to your phone as a text message. @@ -244,6 +243,7 @@ import yagmail robot_secret = os.getenv('ROBOT_SECRET') or '' robot_address = os.getenv('ROBOT_ADDRESS') or '' + async def connect(): creds = Credentials( type='robot-location-secret', @@ -254,14 +254,14 @@ async def connect(): ) return await RobotClient.at_address(robot_address, opts) + async def main(): robot = await connect() - detector = VisionClient.from_robot(robot, "myPeopleDetector") N = 100 for i in range(N): - #make sure that your camera name in the app matches "my-camera" + # make sure that your camera name in the app matches "my-camera" detections = await detector.get_detections_from_camera("my-camera") found = False for d in detections: @@ -276,16 +276,20 @@ async def main(): # Change this path to your own image.save('/yourpath/foundyou.png') # yagmail section - # Create a yagmail.SMTP instance to initialize the server connection - # Replace username and password with your actual credentials + # Create a yagmail.SMTP instance to initialize the server + # connection. Replace username and password with your actual + # credentials yag = yagmail.SMTP('mygmailusername', 'mygmailpassword') # Specify the message contents - contents = ['There is someone at your desk - beware','/yourpath/foundyou.png'] - # Add phone number and gateway address found in the SMS gateway step + contents = ['There is someone at your desk - beware', + '/yourpath/foundyou.png'] + # Add phone number and gateway address found in the SMS gateway + # step yag.send('xxx-xxx-xxxx@tmomail.net', 'subject', contents) - # If the robot detects a person and sends a text, we don't need it to keep sending us more texts - # so we sleep it for 60 seconds before looking for a person again + # If the robot detects a person and sends a text, we don't need + # it to keep sending us more texts so we sleep it for 60 + # seconds before looking for a person again await asyncio.sleep(60) else: print("There's nobody here, don't send a message") @@ -296,7 +300,6 @@ async def main(): if __name__ == '__main__': asyncio.run(main()) - ``` {{% /expand %}} diff --git a/docs/tutorials/projects/tipsy.md b/docs/tutorials/projects/tipsy.md index f89aca1ae9..41ffba5600 100644 --- a/docs/tutorials/projects/tipsy.md +++ b/docs/tutorials/projects/tipsy.md @@ -740,9 +740,9 @@ Replace these values with your robot’s own location secret and address, which ```python {class="line-numbers linkable-line-numbers"} robot_secret = os.getenv('ROBOT_SECRET') or '' robot_address = os.getenv('ROBOT_ADDRESS') or '' -#change this if you named your base differently in your robot configuration +# change this if you named your base differently in your robot configuration base_name = os.getenv('ROBOT_BASE') or 'tipsy-base' -#change this if you named your camera differently in your robot configuration +# change this if you named your camera differently in your robot configuration camera_name = os.getenv('ROBOT_CAMERA') or 'cam' pause_interval = os.getenv('PAUSE_INTERVAL') or 3 ``` @@ -754,19 +754,20 @@ The first method, `obstacle_detect()`, gets readings from a sensor, and the seco ```python {class="line-numbers linkable-line-numbers"} async def obstacle_detect(sensor): - reading = (await sensor.get_readings())["distance"] - return reading + reading = (await sensor.get_readings())["distance"] + return reading + async def obstacle_detect_loop(sensor, sensor2, base): - while(True): - reading = await obstacle_detect(sensor) - reading2 = await obstacle_detect(sensor2) - if reading < 0.4 or reading2 <0.4: - # stop the base if moving straight - if base_state == "straight": - await base.stop() - print("obstacle in front") - await asyncio.sleep(.01) + while (True): + reading = await obstacle_detect(sensor) + reading2 = await obstacle_detect(sensor2) + if reading < 0.4 or reading2 < 0.4: + # stop the base if moving straight + if base_state == "straight": + await base.stop() + print("obstacle in front") + await asyncio.sleep(.01) ``` Then, we define a person detection loop, where the robot is constantly looking for a person, and if it finds the person, it goes toward them as long as there are no obstacles in front. @@ -776,35 +777,36 @@ Lines 12 and 13 are where it checks specifically for detections with the label ` ```python {class="line-numbers linkable-line-numbers" data-line="12-13"} async def person_detect(detector, sensor, sensor2, base): - while(True): - # look for a person - found = False - global base_state - print("will detect") - detections = await detector.get_detections_from_camera(camera_name) - for d in detections: - if d.confidence > .7: - print(d.class_name) - #specify it is just the person we want to detect - if (d.class_name == "Person"): - found = True - if (found): - print("I see a person") - # first manually call obstacle_detect - don't even start moving if someone in the way - distance = await obstacle_detect(sensor) - distance2 = await obstacle_detect(sensor2) - if (distance > .4 or distance2 > .4): - print("will move straight") - base_state = "straight" - await base.move_straight(distance=800, velocity=250) - base_state = "stopped" - else: - print("I will turn and look for a person") - base_state = "spinning" - await base.spin(45, 45) - base_state = "stopped" - - await asyncio.sleep(pause_interval) + while (True): + # look for a person + found = False + global base_state + print("will detect") + detections = await detector.get_detections_from_camera(camera_name) + for d in detections: + if d.confidence > .7: + print(d.class_name) + # specify it is just the person we want to detect + if (d.class_name == "Person"): + found = True + if (found): + print("I see a person") + # first manually call obstacle_detect - don't even start moving if + # someone in the way + distance = await obstacle_detect(sensor) + distance2 = await obstacle_detect(sensor2) + if (distance > .4 or distance2 > .4): + print("will move straight") + base_state = "straight" + await base.move_straight(distance=800, velocity=250) + base_state = "stopped" + else: + print("I will turn and look for a person") + base_state = "spinning" + await base.spin(45, 45) + base_state = "stopped" + + await asyncio.sleep(pause_interval) ``` Finally, the `main()` function initializes the base, the sensors, and the detector. @@ -812,18 +814,24 @@ It also creates two background tasks running asynchronously, one looking for obs ```python {class="line-numbers linkable-line-numbers"} async def main(): - robot = await connect() - base = Base.from_robot(robot, base_name) - sensor = Sensor.from_robot(robot, "ultrasonic") - sensor2 = Sensor.from_robot(robot, "ultrasonic2") - detector = VisionServiceClient.from_robot(robot, "myPeopleDetector") - - # create a background task that looks for obstacles and stops the base if it's moving - obstacle_task = asyncio.create_task(obstacle_detect_loop(sensor, sensor2, base)) - # create a background task that looks for a person and moves towards them, or turns and keeps looking - person_task = asyncio.create_task(person_detect(detector, sensor, sensor2, base)) - results= await asyncio.gather(obstacle_task, person_task, return_exceptions=True) - print(results) + robot = await connect() + base = Base.from_robot(robot, base_name) + sensor = Sensor.from_robot(robot, "ultrasonic") + sensor2 = Sensor.from_robot(robot, "ultrasonic2") + detector = VisionServiceClient.from_robot(robot, "myPeopleDetector") + + # create a background task that looks for obstacles and stops the base if + # it's moving + obstacle_task = asyncio.create_task( + obstacle_detect_loop(sensor, sensor2, base)) + # create a background task that looks for a person and moves towards them, + # or turns and keeps looking + person_task = asyncio.create_task( + person_detect(detector, sensor, sensor2, base)) + results = await asyncio.gather(obstacle_task, + person_task, + return_exceptions=True) + print(results) ``` When you run the code, you should see results like this: diff --git a/docs/tutorials/services/accessing-and-moving-robot-arm.md b/docs/tutorials/services/accessing-and-moving-robot-arm.md index 0015f7ddc9..1397bf57ee 100644 --- a/docs/tutorials/services/accessing-and-moving-robot-arm.md +++ b/docs/tutorials/services/accessing-and-moving-robot-arm.md @@ -347,6 +347,7 @@ async def connect(): ) return await RobotClient.at_address('', opts) + async def main(): robot = await connect() @@ -366,7 +367,8 @@ async def main(): # Command a joint position move: small adjustment to the last joint cmd_joint_positions = JointPositions(values=[0, 0, 0, 0, 0, 15.0]) - await my_arm_component.move_to_joint_positions(positions=cmd_joint_positions) + await my_arm_component.move_to_joint_positions( + positions=cmd_joint_positions) # Generate a simple pose move +100mm in the +Z direction of the arm cmd_arm_pose = await my_arm_component.get_end_position() diff --git a/docs/tutorials/services/color-detection-scuttle.md b/docs/tutorials/services/color-detection-scuttle.md index 2f3a549859..d60697c057 100644 --- a/docs/tutorials/services/color-detection-scuttle.md +++ b/docs/tutorials/services/color-detection-scuttle.md @@ -188,6 +188,7 @@ from viam.components.camera import Camera from viam.components.servo import Servo from viam.services.vision import VisionClient + async def connect(): creds = Credentials( type="robot-location-secret", @@ -198,13 +199,15 @@ async def connect(): ) return await RobotClient.at_address("ADDRESS FROM THE VIAM APP", opts) + async def main(): # Other code + print("Starting main") if __name__ == "__main__": - print("Starting up... ") - asyncio.run(main()) - print("Done.") + print("Starting up... ") + asyncio.run(main()) + print("Done.") ``` You will update the `main()` function later. @@ -219,25 +222,26 @@ The following `leftOrRight` function checks where in the picture the largest det Add the `leftOrRight` function below your `connect` function: ```python {class="line-numbers linkable-line-numbers"} -# Get largest detection box and see if it's center is in the left, center, or right third +# Get largest detection box and see if it's center is in the left, center, or +# right third def leftOrRight(detections, midpoint): - largest_area = 0 - largest = Detection() - if not detections: - print("nothing detected :(") - return -1 - for d in detections: - a = (d.x_max - d.x_min) * (d.y_max-d.y_min) - if a > largest_area: - a = largest_area - largest = d - centerX = largest.x_min + largest.x_max/2 - if centerX < midpoint-midpoint/6: - return 0 # on the left - if centerX > midpoint+midpoint/6: - return 2 # on the right - else: - return 1 # basically centered + largest_area = 0 + largest = Detection() + if not detections: + print("nothing detected :(") + return -1 + for d in detections: + a = (d.x_max - d.x_min) * (d.y_max-d.y_min) + if a > largest_area: + a = largest_area + largest = d + centerX = largest.x_min + largest.x_max/2 + if centerX < midpoint-midpoint/6: + return 0 # on the left + if centerX > midpoint+midpoint/6: + return 2 # on the right + else: + return 1 # basically centered ``` ### Add the main function @@ -253,38 +257,38 @@ Replace the `main` function with the following code: ```python {class="line-numbers linkable-line-numbers"} async def main(): - spinNum = 10 # when turning, spin the motor this much - straightNum = 300 # when going straight, spin motor this much - numCycles = 200 # run the loop X times - vel = 500 # go this fast when moving motor -​ - # Connect to robot client and set up components - robot = await connect() - base = Base.from_robot(robot, "base") - camera = Camera.from_robot(robot, "") - - # Grab the vision service for the detector - my_detector = VisionClient.from_robot(robot, "my_color_detector") - - # Main loop. Detect the ball, determine if it's on the left or right, and head that way. - # Repeat this for numCycles - for i in range(numCycles): - detections = await my_detector.get_detections_from_camera(camera) -​ - answer = leftOrRight(detections, frame.size[0]/2) - if answer == 0: - print("left") - await base.spin(spinNum, vel) # CCW is positive - await base.move_straight(straightNum, vel) - if answer == 1: - print("center") - await base.move_straight(straightNum, vel) - if answer == 2: - print("right") - await base.spin(-spinNum, vel) - # If nothing is detected, nothing moves - - await robot.close()​ + spinNum = 10 # when turning, spin the motor this much + straightNum = 300 # when going straight, spin motor this much + numCycles = 200 # run the loop X times + vel = 500 # go this fast when moving motor + + # Connect to robot client and set up components + robot = await connect() + base = Base.from_robot(robot, "base") + camera = Camera.from_robot(robot, "") + + # Grab the vision service for the detector + my_detector = VisionClient.from_robot(robot, "my_color_detector") + + # Main loop. Detect the ball, determine if it's on the left or right, and + # head that way. Repeat this for numCycles + for i in range(numCycles): + detections = await my_detector.get_detections_from_camera(camera) + + answer = leftOrRight(detections, frame.size[0]/2) + if answer == 0: + print("left") + await base.spin(spinNum, vel) # CCW is positive + await base.move_straight(straightNum, vel) + if answer == 1: + print("center") + await base.move_straight(straightNum, vel) + if answer == 2: + print("right") + await base.spin(-spinNum, vel) + # If nothing is detected, nothing moves + + await robot.close() ``` For ``, insert the name of your configured physical camera. @@ -315,81 +319,86 @@ You could also write some code with a Viam SDK to [make your rover move in a squ ```python {class="line-numbers linkable-line-numbers"} import asyncio -​ + from viam.robot.client import RobotClient from viam.rpc.dial import Credentials, DialOptions from viam.services.vision import VisionServiceClient from viam.services.vision import Detection from viam.components.camera import Camera from viam.components.base import Base -​ -​ + + async def connect(): - creds = Credentials( + creds = Credentials( type="robot-location-secret", - payload="[PLEASE ADD YOUR SECRET HERE. YOU CAN FIND THIS ON THE CODE SAMPLE TAB]") - opts = RobotClient.Options( + payload="[PLEASE ADD YOUR SECRET HERE. " + "YOU CAN FIND THIS ON THE CODE SAMPLE TAB]") + opts = RobotClient.Options( refresh_interval=0, dial_options=DialOptions(credentials=creds) ) - return await RobotClient.at_address("[ADD YOUR ROBOT ADDRESS HERE. YOU CAN FIND THIS ON THE CODE SAMPLE TAB]", opts) -​ -# Get largest detection box and see if it's center is in the left, center, or right third + return await RobotClient.at_address("""[ADD YOUR ROBOT ADDRESS HERE. YOU + CAN FIND THIS ON THE CODE SAMPLE TAB]""", opts) + + +# Get largest detection box and see if it's center is in the left, center, or +# right third def leftOrRight(detections, midpoint): - largest_area = 0 - largest = Detection() - if not detections: - print("nothing detected :(") - return -1 - for d in detections: - a = (d.x_max - d.x_min) * (d.y_max-d.y_min) - if a > largest_area: - a = largest_area - largest = d - centerX = largest.x_min + largest.x_max/2 - if centerX < midpoint-midpoint/6: - return 0 # on the left - if centerX > midpoint+midpoint/6: - return 2 # on the right - else: - return 1 # basically centered -​ + largest_area = 0 + largest = Detection() + if not detections: + print("nothing detected :(") + return -1 + for d in detections: + a = (d.x_max - d.x_min) * (d.y_max-d.y_min) + if a > largest_area: + a = largest_area + largest = d + centerX = largest.x_min + largest.x_max/2 + if centerX < midpoint-midpoint/6: + return 0 # on the left + if centerX > midpoint+midpoint/6: + return 2 # on the right + else: + return 1 # basically centered + + async def main(): - spinNum = 10 # when turning, spin the motor this much - straightNum = 300 # when going straight, spin motor this much - numCycles = 200 # run the loop X times - vel = 500 # go this fast when moving motor -​ - # Connect to robot client and set up components - robot = await connect() - base = Base.from_robot(robot, "base") - camera = Camera.from_robot(robot, "") - - # Grab the vision service for the detector - my_detector = VisionClient.from_robot(robot, "my_color_detector") - - # Main loop. Detect the ball, determine if it's on the left or right, and head that way. - # Repeat this for numCycles - for i in range(numCycles): - detections = await my_detector.get_detections_from_camera(camera) -​ - answer = leftOrRight(detections, frame.size[0]/2) - if answer == 0: - print("left") - await base.spin(spinNum, vel) # CCW is positive - await base.move_straight(straightNum, vel) - if answer == 1: - print("center") - await base.move_straight(straightNum, vel) - if answer == 2: - print("right") - await base.spin(-spinNum, vel) - # If nothing is detected, nothing moves - - await robot.close() -​ + spinNum = 10 # when turning, spin the motor this much + straightNum = 300 # when going straight, spin motor this much + numCycles = 200 # run the loop X times + vel = 500 # go this fast when moving motor + + # Connect to robot client and set up components + robot = await connect() + base = Base.from_robot(robot, "base") + camera = Camera.from_robot(robot, "") + + # Grab the vision service for the detector + my_detector = VisionClient.from_robot(robot, "my_color_detector") + + # Main loop. Detect the ball, determine if it's on the left or right, and + # head that way. Repeat this for numCycles + for i in range(numCycles): + detections = await my_detector.get_detections_from_camera(camera) + + answer = leftOrRight(detections, frame.size[0]/2) + if answer == 0: + print("left") + await base.spin(spinNum, vel) # CCW is positive + await base.move_straight(straightNum, vel) + if answer == 1: + print("center") + await base.move_straight(straightNum, vel) + if answer == 2: + print("right") + await base.spin(-spinNum, vel) + # If nothing is detected, nothing moves + + await robot.close() + if __name__ == "__main__": - print("Starting up... ") - asyncio.run(main()) - print("Done.") + print("Starting up... ") + asyncio.run(main()) + print("Done.") ``` diff --git a/docs/tutorials/services/constrain-motion.md b/docs/tutorials/services/constrain-motion.md index 6e1dd2d5a2..8d0f094f53 100644 --- a/docs/tutorials/services/constrain-motion.md +++ b/docs/tutorials/services/constrain-motion.md @@ -170,22 +170,25 @@ The following example shows the two obstacles defined such that the table's top Adjust the dimensions and positions of the obstacles to describe your own scenario: ```python {class="line-numbers linkable-line-numbers"} -# Use this offset to set your z to calibrate based on where your table actually is +# Use this offset to set your z to calibrate based on where your table actually +# is z_offset = 10 # Create a table obstacle with its top surface at z=0 table_origin = Pose(x=0.0, y=0.0, z=-19.0+z_offset) table_dims = Vector3(x=2000.0, y=2000.0, z=38.0) -table_object = Geometry(center=table_origin, box=RectangularPrism(dims_mm=table_dims)) +table_object = Geometry(center=table_origin, + box=RectangularPrism(dims_mm=table_dims)) -# Create a tissue box obstacle. Setting the box's origin to 50mm above the table -# positions the 100mm tall box on top of the table. +# Create a tissue box obstacle. Setting the box's origin to 50mm above the +# table positions the 100mm tall box on top of the table. box_origin = Pose(x=400, y=0, z=50+z_offset) box_dims = Vector3(x=120.0, y=80.0, z=100.0) -box_object = Geometry(center=box_origin, box=RectangularPrism(dims_mm=box_dims)) +box_object = Geometry(center=box_origin, + box=RectangularPrism(dims_mm=box_dims)) obstacles_in_frame = GeometriesInFrame(reference_frame="world", - geometries=[table_object, box_object]) + geometries=[table_object, box_object]) ``` ## Determine the gripper's axes @@ -212,15 +215,18 @@ You can pass transforms to the [motion service `move` method](../../../services/ To represent the drinking cup held in your robot's gripper, create a transform with the cup's measurements: ```python {class="line-numbers linkable-line-numbers"} -# Create a transform to represent the cup as a 120mm tall, 45mm radius capsule shape +# Create a transform to represent the cup as a 120mm tall, 45mm radius capsule +# shape cup_geometry = Geometry(center=Pose(x=0, y=0, z=155), - capsule=Capsule(radius_mm=45, length_mm=120)) + capsule=Capsule(radius_mm=45, length_mm=120)) transforms = [ - # Name the reference frame "cup" and point its long axis along the y axis of the gripper - Transform(reference_frame="cup", pose_in_observer_frame=PoseInFrame( - reference_frame="myGripper", - pose=Pose(x=0, y=0, z=45, o_x=0, o_y=-1, o_z=0, theta=0)), - physical_object=cup_geometry) + # Name the reference frame "cup" and point its long axis along the y axis + # of the gripper + Transform(reference_frame="cup", + pose_in_observer_frame=PoseInFrame( + reference_frame="myGripper", + pose=Pose(x=0, y=0, z=45, o_x=0, o_y=-1, o_z=0, theta=0)), + physical_object=cup_geometry) ] ``` @@ -232,8 +238,9 @@ If your gripper has different dimensions, change the offset accordingly. Now that you have created the table and tissue box obstacles as well as the cup transform, create a [WorldState](https://python.viam.dev/autoapi/viam/proto/common/index.html#viam.proto.common.WorldState) that includes all of them: ```python {class="line-numbers linkable-line-numbers"} -# Create a WorldState that includes the table and tissue box obstacles, and the cup transform -world_state = WorldState(obstacles=[obstacles_in_frame], transforms = transforms) +# Create a WorldState that includes the table and tissue box obstacles, and the +# cup transform +world_state = WorldState(obstacles=[obstacles_in_frame], transforms=transforms) ``` ## Define start and end poses @@ -248,8 +255,8 @@ However, this is more computationally intensive than planning a linear path from To increase your program's efficiency, add waypoints such that the path between any two consecutive points can be linear, without intersecting the tissue box or the table: ```python {class="line-numbers linkable-line-numbers"} -# Create a start pose, where the cup starts between the gripper's jaws, on the table -# Start pose has Z of 90mm to grab partway down the 120mm tall cup +# Create a start pose, where the cup starts between the gripper's jaws, on the +# table. Start pose has Z of 90mm to grab partway down the 120mm tall cup start_pose = Pose(x=320, y=240, z=90.0+z_offset, o_x=1, o_y=0, o_z=0, theta=0) start_pose_in_frame = PoseInFrame(reference_frame="world", pose=start_pose) @@ -291,18 +298,25 @@ However, since this opens up many more options for potential paths, it is much m The code below creates a linear constraint and then uses that constraint to keep the cup upright and move it in a series of linear paths along the predetermined route while avoiding the obstacles we've defined: ```python {class="line-numbers linkable-line-numbers"} -# Create a linear constraint to keep the cup upright and constrain it to a linear path -constraints = Constraints(linear_constraint = [LinearConstraint()]) +# Create a linear constraint to keep the cup upright and constrain it to a +# linear path +constraints = Constraints(linear_constraint=[LinearConstraint()]) # Move the cup to the end position without hitting the box on the table, # and while keeping the cup upright await motion_service.move(component_name=my_gripper_resource_name, - destination=way1_pose_in_frame, world_state=world_state, constraints=constraints) + destination=way1_pose_in_frame, + world_state=world_state, + constraints=constraints) await motion_service.move(component_name=my_gripper_resource_name, - destination=way2_pose_in_frame, world_state=world_state, constraints=constraints) + destination=way2_pose_in_frame, + world_state=world_state, + constraints=constraints) await motion_service.move(component_name=my_gripper_resource_name, - destination=end_pose_in_frame, world_state=world_state, constraints=constraints) -print(f"At end pose") + destination=end_pose_in_frame, + world_state=world_state, + constraints=constraints) +print("At end pose") # Put down the cup await my_gripper.open() @@ -322,12 +336,16 @@ from viam.components.arm import Arm from viam.proto.component.arm import JointPositions from viam.components.gripper import Gripper from viam.services.motion import MotionClient -from viam.proto.common import Pose, PoseInFrame, Vector3, Geometry, GeometriesInFrame, RectangularPrism +from viam.proto.common import Pose, PoseInFrame, Vector3, Geometry, \ + GeometriesInFrame, RectangularPrism from viam.proto.service.motion import Constraints, LinearConstraint + async def connect(): - creds = Credentials(type="robot-location-secret", payload="") - opts = RobotClient.Options(refresh_interval=0, dial_options=DialOptions(credentials=creds)) + creds = Credentials(type="robot-location-secret", + payload="") + opts = RobotClient.Options(refresh_interval=0, + dial_options=DialOptions(credentials=creds)) return await RobotClient.at_address("", opts) @@ -344,79 +362,121 @@ async def main(): # Get the pose of the arm from the arm API cmd_arm_pose = await my_arm.get_end_position() - print(f"Pose of end of myArm from get_end_position:", cmd_arm_pose) + print("Pose of end of myArm from get_end_position:", cmd_arm_pose) # Access the motion service motion_service = MotionClient.from_robot(robot, "builtin") # Get the pose of myArm from the motion service - my_arm_motion_pose = await motion_service.get_pose(my_arm_resource_name, "world") - print(f"Pose of myArm from the motion service: {my_arm_motion_pose}") + my_arm_motion_pose = await motion_service.get_pose(my_arm_resource_name, + "world") + print("Pose of myArm from the motion service:", my_arm_motion_pose) - # Use this offset to set your z to calibrate based on where your table actually is + # Use this offset to set your z to calibrate based on where your table + # actually is z_offset = 10 # Create a table obstacle table_origin = Pose(x=0.0, y=0.0, z=-19.0+z_offset) table_dims = Vector3(x=2000.0, y=2000.0, z=38.0) - table_object = Geometry(center=table_origin, box=RectangularPrism(dims_mm=table_dims)) + table_object = Geometry(center=table_origin, + box=RectangularPrism(dims_mm=table_dims)) # Create a tissue box obstacle box_origin = Pose(x=400, y=0, z=50+z_offset) box_dims = Vector3(x=120.0, y=80.0, z=100.0) - box_object = Geometry(center=box_origin, box=RectangularPrism(dims_mm=box_dims)) + box_object = Geometry(center=box_origin, + box=RectangularPrism(dims_mm=box_dims)) - obstacles_in_frame = GeometriesInFrame(reference_frame="world", + obstacles_in_frame = GeometriesInFrame( + reference_frame="world", geometries=[table_object, box_object]) - # Create a transform to represent the cup as a 120mm tall, 45mm radius capsule shape + # Create a transform to represent the cup as a 120mm tall, 45mm radius + # capsule shape cup_geometry = Geometry(center=Pose(x=0, y=0, z=155), - capsule=Capsule(radius_mm=45, length_mm=120)) + capsule=Capsule(radius_mm=45, length_mm=120)) transforms = [ - # Name the reference frame "cup" and point its long axis along the y axis of the gripper + # Name the reference frame "cup" and point its long axis along the y + # axis of the gripper Transform(reference_frame="cup", pose_in_observer_frame=PoseInFrame( reference_frame="myGripper", pose=Pose(x=0, y=0, z=45, o_x=0, o_y=-1, o_z=0, theta=0)), physical_object=cup_geometry) ] - # Create a WorldState that includes the table and tissue box obstacles, and the cup transform - world_state = WorldState(obstacles=[obstacles_in_frame], transforms = transforms) - - # Create a start pose, where the cup starts between the gripper's jaws, on the table - # Start pose has Z of 90mm to grab partway down the 120mm tall cup - start_pose = Pose(x=320, y=240, z=90.0+z_offset, o_x=1, o_y=0, o_z=0, theta=0) + # Create a WorldState that includes the table and tissue box obstacles, and + # the cup transform + world_state = WorldState(obstacles=[obstacles_in_frame], + transforms=transforms) + + # Create a start pose, where the cup starts between the gripper's jaws, on + # the table. Start pose has Z of 90mm to grab partway down the 120mm tall + # cup + start_pose = Pose(x=320, + y=240, + z=90.0+z_offset, + o_x=1, + o_y=0, + o_z=0, + theta=0) start_pose_in_frame = PoseInFrame(reference_frame="world", pose=start_pose) # Create waypoints to increase efficiency of motion planning - way1_pose = Pose(x=300, y=240, z=320+z_offset, o_x=1, o_y=0, o_z=0, theta=0) + way1_pose = Pose(x=300, + y=240, + z=320+z_offset, + o_x=1, + o_y=0, + o_z=0, + theta=0) way1_pose_in_frame = PoseInFrame(reference_frame="world", pose=way1_pose) - way2_pose = Pose(x=300, y=-240, z=320+z_offset, o_x=1, o_y=0, o_z=0, theta=0) + way2_pose = Pose(x=300, + y=-240, + z=320+z_offset, + o_x=1, + o_y=0, + o_z=0, + theta=0) way2_pose_in_frame = PoseInFrame(reference_frame="world", pose=way2_pose) # Create a pose where the cup will be set down - end_pose = Pose(x=300, y=-250, z=90.0+z_offset, o_x=1, o_y=0, o_z=0, theta=0) + end_pose = Pose(x=300, + y=-250, + z=90.0+z_offset, + o_x=1, + o_y=0, + o_z=0, + theta=0) end_pose_in_frame = PoseInFrame(reference_frame="world", pose=end_pose) # Move to the starting position and grab the cup - # This motion has no orientation constraints because it hasn't picked up the cup yet + # This motion has no orientation constraints because it hasn't picked up + # the cup yet await motion_service.move(component_name=my_gripper_resource_name, - destination=start_pose_in_frame, world_state=world_state) - print(f"At start pose") + destination=start_pose_in_frame, + world_state=world_state) + print("At start pose") await my_gripper.grab() # Create a constraint to keep the cup upright as it moves - constraints = Constraints(linear_constraint = [LinearConstraint()]) + constraints = Constraints(linear_constraint=[LinearConstraint()]) # Move the cup to the end position without hitting the box on the table, # and while keeping the cup upright await motion_service.move(component_name=my_gripper_resource_name, - destination=way1_pose_in_frame, world_state=world_state, constraints=constraints) + destination=way1_pose_in_frame, + world_state=world_state, + constraints=constraints) await motion_service.move(component_name=my_gripper_resource_name, - destination=way2_pose_in_frame, world_state=world_state, constraints=constraints) + destination=way2_pose_in_frame, + world_state=world_state, + constraints=constraints) await motion_service.move(component_name=my_gripper_resource_name, - destination=end_pose_in_frame, world_state=world_state, constraints=constraints) - print(f"At end pose") + destination=end_pose_in_frame, + world_state=world_state, + constraints=constraints) + print("At end pose") # Put down the cup await my_gripper.open() @@ -427,7 +487,6 @@ async def main(): if __name__ == "__main__": asyncio.run(main()) - ``` ## Next steps diff --git a/docs/tutorials/services/plan-motion-with-arm-gripper.md b/docs/tutorials/services/plan-motion-with-arm-gripper.md index 9d5d9b9742..2d18d98227 100644 --- a/docs/tutorials/services/plan-motion-with-arm-gripper.md +++ b/docs/tutorials/services/plan-motion-with-arm-gripper.md @@ -130,7 +130,8 @@ Any components that have frame information (and, as a result, are added to the f ```python {class="line-numbers linkable-line-numbers"} # Get the pose of myArm from the motion service -my_arm_motion_pose = await motion_service.get_pose(my_arm_resource_name, "world") +my_arm_motion_pose = await motion_service.get_pose(my_arm_resource_name, + "world") print(f"Pose of myArm from the motion service: {my_arm_motion_pose}") ``` @@ -173,9 +174,11 @@ You must add additional imports to access `Pose`, `PoseInFrame`, `Vector3`, `Geo # Add a table obstacle to a WorldState table_origin = Pose(x=0.0, y=0.0, z=-19.0) table_dims = Vector3(x=2000.0, y=2000.0, z=38.0) -table_object = Geometry(center=table_origin, box=RectangularPrism(dims_mm=table_dims)) +table_object = Geometry(center=table_origin, + box=RectangularPrism(dims_mm=table_dims)) -obstacles_in_frame = GeometriesInFrame(reference_frame="world", geometries=[table_object]) +obstacles_in_frame = GeometriesInFrame(reference_frame="world", + geometries=[table_object]) # Create a WorldState that has the GeometriesInFrame included world_state = WorldState(obstacles=[obstacles_in_frame]) @@ -245,10 +248,19 @@ Keep the space around the arm clear! ```python {class="line-numbers linkable-line-numbers"} # Generate a sample "start" pose to demonstrate motion -test_start_pose = Pose(x=510.0, y=0.0, z=526.0, o_x=0.7071, o_y=0.0, o_z=-0.7071, theta=0.0) -test_start_pose_in_frame = PoseInFrame(reference_frame="world", pose=test_start_pose) - -await motion_service.move(component_name=my_arm_resource_name, destination=test_start_pose_in_frame, world_state=world_state) +test_start_pose = Pose(x=510.0, + y=0.0, + z=526.0, + o_x=0.7071, + o_y=0.0, + o_z=-0.7071, + theta=0.0) +test_start_pose_in_frame = PoseInFrame(reference_frame="world", + pose=test_start_pose) + +await motion_service.move(component_name=my_arm_resource_name, + destination=test_start_pose_in_frame, + world_state=world_state) ``` {{% /tab %}} @@ -324,10 +336,21 @@ Then add this code to your `main()`: my_gripper_resource = Gripper.get_resource_name("myGripper") # Move the gripper in the -Z direction with respect to its own reference frame -gripper_pose_rev = Pose(x=0.0, y=0.0, z=-100.0, o_x=0.0, o_y=0.0, o_z=1.0, theta=0.0) -gripper_pose_rev_in_frame = PoseInFrame(reference_frame=my_gripper_resource.name, pose=gripper_pose_rev) # Note the change in frame name - -await motion_service.move(component_name=my_gripper_resource, destination=gripper_pose_rev_in_frame, world_state=world_state) +gripper_pose_rev = Pose(x=0.0, + y=0.0, + z=-100.0, + o_x=0.0, + o_y=0.0, + o_z=1.0, + theta=0.0) +# Note the change in frame name +gripper_pose_rev_in_frame = PoseInFrame( + reference_frame=my_gripper_resource.name, + pose=gripper_pose_rev) + +await motion_service.move(component_name=my_gripper_resource, + destination=gripper_pose_rev_in_frame, + world_state=world_state) ``` {{% /tab %}} @@ -388,7 +411,8 @@ import asyncio from viam.components.arm import Arm from viam.components.gripper import Gripper -from viam.proto.common import Geometry, GeometriesInFrame, Pose, PoseInFrame, RectangularPrism, Vector3, WorldState +from viam.proto.common import Geometry, GeometriesInFrame, Pose, PoseInFrame, \ + RectangularPrism, Vector3, WorldState from viam.proto.component.arm import JointPositions from viam.robot.client import RobotClient from viam.rpc.dial import Credentials, DialOptions @@ -405,6 +429,7 @@ async def connect(): ) return await RobotClient.at_address('', opts) + async def main(): robot = await connect() @@ -425,7 +450,8 @@ async def main(): # Command a joint position move: move the forearm of the arm slightly up cmd_joint_positions = JointPositions(values=[0, 0, -30.0, 0, 0, 0]) - await my_arm_component.move_to_joint_positions(positions=cmd_joint_positions) + await my_arm_component.move_to_joint_positions( + positions=cmd_joint_positions) # Generate a simple pose move +100mm in the +Z direction of the arm cmd_arm_pose = await my_arm_component.get_end_position() @@ -436,32 +462,56 @@ async def main(): motion_service = MotionClient.from_robot(robot, "builtin") # Get the pose of myArm from the motion service - my_arm_motion_pose = await motion_service.get_pose(my_arm_resource_name, "world") + my_arm_motion_pose = await motion_service.get_pose(my_arm_resource_name, + "world") print(f"Pose of myArm from the motion service: {my_arm_motion_pose}") # Add a table obstacle to a WorldState table_origin = Pose(x=-202.5, y=-546.5, z=-19.0) table_dims = Vector3(x=635.0, y=1271.0, z=38.0) - table_object = Geometry(center=table_origin, box=RectangularPrism(dims_mm=table_dims)) + table_object = Geometry(center=table_origin, + box=RectangularPrism(dims_mm=table_dims)) - obstacles_in_frame = GeometriesInFrame(reference_frame="world", geometries=[table_object]) + obstacles_in_frame = GeometriesInFrame(reference_frame="world", + geometries=[table_object]) # Create a WorldState that has the GeometriesInFrame included world_state = WorldState(obstacles=[obstacles_in_frame]) # Generate a sample "start" pose to demonstrate motion - test_start_pose = Pose(x=510.0, y=0.0, z=526.0, o_x=0.7071, o_y=0.0, o_z=-0.7071, theta=0.0) - test_start_pose_in_frame = PoseInFrame(reference_frame="world", pose=test_start_pose) - - await motion_service.move(component_name=my_arm_resource_name, destination=test_start_pose_in_frame, world_state=world_state) + test_start_pose = Pose(x=510.0, + y=0.0, + z=526.0, + o_x=0.7071, + o_y=0.0, + o_z=-0.7071, + theta=0.0) + test_start_pose_in_frame = PoseInFrame(reference_frame="world", + pose=test_start_pose) + + await motion_service.move(component_name=my_arm_resource_name, + destination=test_start_pose_in_frame, + world_state=world_state) my_gripper_resource = Gripper.get_resource_name("myGripper") - # This will move the gripper in the -Z direction with respect to its own reference frame - gripper_pose_rev = Pose(x=0.0, y=0.0, z=-100.0, o_x=0.0, o_y=0.0, o_z=1.0, theta=0.0) - gripper_pose_rev_in_frame = PoseInFrame(reference_frame=my_gripper_resource.name, pose=gripper_pose_rev) # Note the change in frame name - - await motion_service.move(component_name=my_gripper_resource, destination=gripper_pose_rev_in_frame, world_state=world_state) + # This will move the gripper in the -Z direction with respect to its own + # reference frame + gripper_pose_rev = Pose(x=0.0, + y=0.0, + z=-100.0, + o_x=0.0, + o_y=0.0, + o_z=1.0, + theta=0.0) + # Note the change in frame name + gripper_pose_rev_in_frame = PoseInFrame( + reference_frame=my_gripper_resource.name, + pose=gripper_pose_rev) + + await motion_service.move(component_name=my_gripper_resource, + destination=gripper_pose_rev_in_frame, + world_state=world_state) # Don't forget to close the robot when you're done! await robot.close() diff --git a/docs/tutorials/services/webcam-line-follower-robot.md b/docs/tutorials/services/webcam-line-follower-robot.md index 609c963995..ece37a9e49 100644 --- a/docs/tutorials/services/webcam-line-follower-robot.md +++ b/docs/tutorials/services/webcam-line-follower-robot.md @@ -457,13 +457,15 @@ The code you are using has several functions: ```python {class="line-numbers linkable-line-numbers"} async def is_color_in_front(camera, detector): """ - Returns whether the appropriate path color is detected in front of the center of the robot. + Returns whether the appropriate path color is detected in front of the + center of the robot. """ frame = await camera.get_image(mime_type="image/jpeg") x, y = frame.size[0], frame.size[1] - # Crop the image to get only the middle fifth of the top third of the original image + # Crop the image to get only the middle fifth of the top third of the + # original image cropped_frame = frame.crop((x / 2.5, 0, x / 1.25, y / 3)) detections = await detector.get_detections(cropped_frame) @@ -478,7 +480,8 @@ The code you are using has several functions: ```python {class="line-numbers linkable-line-numbers"} async def is_color_there(camera, detector, location): """ - Returns whether the appropriate path color is detected to the left/right of the robot's front. + Returns whether the appropriate path color is detected to the left/right + of the robot's front. """ frame = await camera.get_image(mime_type="image/jpeg") x, y = frame.size[0], frame.size[1] @@ -547,12 +550,14 @@ async def main(): # Moves the base slowly forward in a straight line await base.set_power(Vector3(y=linear_power), Vector3()) counter == 0 - # If there is green to the left, turns the base left at a continuous, slow speed + # If there is green to the left, turns the base left at a continuous, + # slow speed if await is_color_there(camera, green_detector, "left"): print("going left") await base.set_power(Vector3(), Vector3(z=angular_power)) counter == 0 - # If there is green to the right, turns the base right at a continuous, slow speed + # If there is green to the right, turns the base right at a continuous, + # slow speed elif await is_color_there(camera, green_detector, "right"): print("going right") await base.set_power(Vector3(), Vector3(z=-angular_power)) From 3484bd9c3b1e45561532a1356d9bacb3270d42e0 Mon Sep 17 00:00:00 2001 From: Naomi Pentrel <5212232+npentrel@users.noreply.github.com> Date: Thu, 14 Sep 2023 19:58:56 -0400 Subject: [PATCH 2/7] Add python test github action --- .github/workflows/python-lint.yml | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 .github/workflows/python-lint.yml diff --git a/.github/workflows/python-lint.yml b/.github/workflows/python-lint.yml new file mode 100644 index 0000000000..f93d11398f --- /dev/null +++ b/.github/workflows/python-lint.yml @@ -0,0 +1,19 @@ +name: Lint Python Code Snippets + +on: + workflow_dispatch: + pull_request: + +jobs: + python_lint: + name: Check Python snippets in Markdown files + continue-on-error: true + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Install Flake8 Markdown linter + run: pip3 install flake8-markdown + + - name: Markdown Lint + run: flake8-markdown "docs/**/*.md" From cd2cf1ea9295de8a63d26fb8233495a36e57d085 Mon Sep 17 00:00:00 2001 From: Naomi Pentrel <5212232+npentrel@users.noreply.github.com> Date: Thu, 14 Sep 2023 20:17:24 -0400 Subject: [PATCH 3/7] Add pre-commit hook info to readme --- .pre-commit-config.yaml | 6 ++++++ README.md | 2 ++ 2 files changed, 8 insertions(+) create mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..e7b56bc2c6 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,6 @@ +# .pre-commit-config.yaml +repos: + - repo: https://github.com/johnfraney/flake8-markdown + rev: v0.5.0 + hooks: + - id: flake8-markdown diff --git a/README.md b/README.md index 961950fd9c..7bfaabb726 100644 --- a/README.md +++ b/README.md @@ -43,6 +43,8 @@ python3 -m http.server 9000 --directory public ## Test the docs locally +To ensure all python snippets are properly formatted before creating a PR, create a [pre-commit hook](https://github.com/johnfraney/flake8-markdown#pre-commit-hook) for [flake8-markdown](https://github.com/johnfraney/flake8-markdown). + To ensure your markdown is properly formatted, run `make markdowntest`. To check for broken links run `make htmltest`. From 545e64af9785613a62d24eafa43baea5c397f0a4 Mon Sep 17 00:00:00 2001 From: Naomi Pentrel <5212232+npentrel@users.noreply.github.com> Date: Thu, 14 Sep 2023 20:17:46 -0400 Subject: [PATCH 4/7] test --- .pre-commit-config.yaml | 6 ------ README.md | 6 +++++- 2 files changed, 5 insertions(+), 7 deletions(-) delete mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml deleted file mode 100644 index e7b56bc2c6..0000000000 --- a/.pre-commit-config.yaml +++ /dev/null @@ -1,6 +0,0 @@ -# .pre-commit-config.yaml -repos: - - repo: https://github.com/johnfraney/flake8-markdown - rev: v0.5.0 - hooks: - - id: flake8-markdown diff --git a/README.md b/README.md index 7bfaabb726..641b7bf233 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,11 @@ python3 -m http.server 9000 --directory public ## Test the docs locally -To ensure all python snippets are properly formatted before creating a PR, create a [pre-commit hook](https://github.com/johnfraney/flake8-markdown#pre-commit-hook) for [flake8-markdown](https://github.com/johnfraney/flake8-markdown). +To ensure all python snippets are properly formatted before creating a PR, install [flake8-markdown](https://github.com/johnfraney/flake8-markdown) and add the following like to `.git/hooks/pre-commit`: + +```sh +flake8-markdown "docs/**/*.md" +``` To ensure your markdown is properly formatted, run `make markdowntest`. From 675c17104624d5d491b465179bea9c7e04b5abef Mon Sep 17 00:00:00 2001 From: Naomi Pentrel <5212232+npentrel@users.noreply.github.com> Date: Wed, 20 Sep 2023 12:09:17 -0400 Subject: [PATCH 5/7] Update docs/tutorials/services/color-detection-scuttle.md Co-authored-by: andf-viam <132301587+andf-viam@users.noreply.github.com> --- docs/tutorials/services/color-detection-scuttle.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/tutorials/services/color-detection-scuttle.md b/docs/tutorials/services/color-detection-scuttle.md index d60697c057..3b326b283d 100644 --- a/docs/tutorials/services/color-detection-scuttle.md +++ b/docs/tutorials/services/color-detection-scuttle.md @@ -331,8 +331,7 @@ from viam.components.base import Base async def connect(): creds = Credentials( type="robot-location-secret", - payload="[PLEASE ADD YOUR SECRET HERE. " - "YOU CAN FIND THIS ON THE CODE SAMPLE TAB]") + payload="") opts = RobotClient.Options( refresh_interval=0, dial_options=DialOptions(credentials=creds) From dedeb574d436f2bc0358c8fa1272114b40890943 Mon Sep 17 00:00:00 2001 From: Naomi Pentrel <5212232+npentrel@users.noreply.github.com> Date: Wed, 20 Sep 2023 16:04:03 -0400 Subject: [PATCH 6/7] Apply suggestions from code review --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 641b7bf233..bab263012b 100644 --- a/README.md +++ b/README.md @@ -43,10 +43,10 @@ python3 -m http.server 9000 --directory public ## Test the docs locally -To ensure all python snippets are properly formatted before creating a PR, install [flake8-markdown](https://github.com/johnfraney/flake8-markdown) and add the following like to `.git/hooks/pre-commit`: +To ensure all python snippets are properly formatted before creating a commit, install [flake8-markdown](https://github.com/johnfraney/flake8-markdown) and add the following like to `.git/hooks/pre-commit`: ```sh -flake8-markdown "docs/**/*.md" +flake8-markdown $(git diff --diff-filter=d --name-only HEAD | grep '\.md$') ``` To ensure your markdown is properly formatted, run `make markdowntest`. From 6e3895f90a52b7a412659a363e57d2fc243cf83f Mon Sep 17 00:00:00 2001 From: Naomi Pentrel <5212232+npentrel@users.noreply.github.com> Date: Thu, 21 Sep 2023 06:05:45 +0200 Subject: [PATCH 7/7] Update _index.md Co-authored-by: JessamyT <75634662+JessamyT@users.noreply.github.com> --- docs/components/gantry/_index.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/components/gantry/_index.md b/docs/components/gantry/_index.md index c3cb8de3ab..446e9df961 100644 --- a/docs/components/gantry/_index.md +++ b/docs/components/gantry/_index.md @@ -366,8 +366,8 @@ For more information, see the [Python SDK Docs](https://python.viam.dev/_modules ```python my_gantry = Gantry.from_robot(robot=robot, name="my_gantry") -# Stop all motion of the gantry. It is assumed that the gantry stops -# immediately. +# Stop all motion of the gantry. It is assumed that the +# gantry stops immediately. await my_gantry.stop() # Print if the gantry is currently moving.