Skip to content

Commit

Permalink
Added FoodOnFork Detection to Web App (#127)
Browse files Browse the repository at this point in the history
* Added a FoodOnFork dummy node

* Added and tested FoF detection on web app

* Change to detecting no FoF

* Start with predicting FoF

* Check for FoF Success

* Start with predicting FoF in the dummy node

* Add more descriptive text

* Added and tested auto-continue for BiteAcquisitionCheck

* Adjusted the moving away text
  • Loading branch information
amalnanavati authored Mar 23, 2024
1 parent bbeb0da commit ccafe3c
Show file tree
Hide file tree
Showing 8 changed files with 657 additions and 118 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,6 @@ def __init__(
self.camera_callback,
1,
)
self.subscription # prevent unused variable warning

# Create the publishers
self.publisher_results = self.create_publisher(
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
#!/usr/bin/env python3
from ada_feeding_msgs.msg import FoodOnForkDetection
from std_srvs.srv import SetBool
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import CompressedImage
from threading import Lock


class FoodOnForkDetectionNode(Node):
def __init__(
self,
food_on_fork_detection_interval=90,
num_images_with_food=90,
):
"""
Initializes the FoodOnForkDetection node, which exposes a SetBool
service that can be used to toggle the food on fork detection on or off and
publishes information to the /food_on_fork_detection topic when food-on-fork
detection is on.
After food_on_fork_detection_interval images without food, this dummy function
detects food for num_images_with_food frames.
Parameters:
----------
food_on_fork_detection_interval: The number of frames between each food detection.
num_images_with_food: The number of frames that must have a food in them.
"""
super().__init__("food_on_fork_detection")

# Internal variables to track when food should be detected
self.food_on_fork_detection_interval = food_on_fork_detection_interval
self.num_images_with_food = num_images_with_food
self.num_consecutive_images_without_food = (
self.food_on_fork_detection_interval
) # Start predicting FoF
self.num_consecutive_images_with_food = 0

# Keeps track of whether food on fork detection is on or not
self.is_on = False
self.is_on_lock = Lock()

# Create the service
self.srv = self.create_service(
SetBool,
"toggle_food_on_fork_detection",
self.toggle_food_on_fork_detection_callback,
)

# Subscribe to the camera feed
self.subscription = self.create_subscription(
CompressedImage,
"camera/color/image_raw/compressed",
self.camera_callback,
1,
)

# Create the publishers
self.publisher_results = self.create_publisher(
FoodOnForkDetection, "food_on_fork_detection", 1
)

def toggle_food_on_fork_detection_callback(self, request, response):
"""
Callback function for the SetBool service. Safely toggles
the food on fork detection on or off depending on the request.
"""
self.get_logger().info("Incoming service request. turn_on: %s" % (request.data))
if request.data:
# Reset counters
self.num_consecutive_images_without_food = (
self.food_on_fork_detection_interval
) # Start predicting FoF
self.num_consecutive_images_with_food = 0
# Turn on food-on-fork detection
self.is_on_lock.acquire()
self.is_on = True
self.is_on_lock.release()
response.success = True
response.message = "Succesfully turned food-on-fork detection on"
else:
self.is_on_lock.acquire()
self.is_on = False
self.is_on_lock.release()
response.success = True
response.message = "Succesfully turned food-on-fork detection off"
return response

def camera_callback(self, msg):
"""
Callback function for the camera feed. If food-on-fork detection is on, this
function will detect food in the image and publish information about
them to the /food_on_fork_detection topic.
"""
self.get_logger().debug("Received image")
self.is_on_lock.acquire()
is_on = self.is_on
self.is_on_lock.release()
if is_on:
# Update the number of consecutive images with/without a food
is_food_detected = False
if self.num_consecutive_images_with_food == self.num_images_with_food:
self.num_consecutive_images_without_food = 0
self.num_consecutive_images_with_food = 0
if (
self.num_consecutive_images_without_food
== self.food_on_fork_detection_interval
):
# Detect food on the fork
self.num_consecutive_images_with_food += 1
is_food_detected = True
else:
# Don't detect food
self.num_consecutive_images_without_food += 1

# Publish the food-on-fork detection information
food_on_fork_detection_msg = FoodOnForkDetection()
food_on_fork_detection_msg.header = msg.header
food_on_fork_detection_msg.probability = 1.0 if is_food_detected else 0.0
food_on_fork_detection_msg.status = food_on_fork_detection_msg.SUCCESS
food_on_fork_detection_msg.message = (
"Food detected" if is_food_detected else "No food detected"
)
self.publisher_results.publish(food_on_fork_detection_msg)


def main(args=None):
rclpy.init(args=args)

food_on_fork_detection = FoodOnForkDetectionNode()

rclpy.spin(food_on_fork_detection)

rclpy.shutdown()


if __name__ == "__main__":
main()
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
<arg name="run_web_bridge" default="true" description="Whether to run rosbridge and web_video_server" />
<arg name="run_food_detection" default="true" description="Whether to run the dummy food detectoion node" />
<arg name="run_face_detection" default="true" description="Whether to run the dummy face detection node" />
<arg name="run_food_on_fork_detection" default="true" description="Whether to run the dummy food-on-fork detection node" />
<arg name="run_real_sense" default="true" description="Whether to run the dummy RealSense node" />
<arg name="run_motion" default="true" description="Whether to run the dummy motion nodes" />
<arg name="rgb_path" default="above_plate_2_rgb.jpg" description="The path to the RGB image/video to publish from the dummy node, relative to this node's share/data folder." />
Expand All @@ -24,7 +25,7 @@
<remap from="~/aligned_depth" to="/camera/aligned_depth_to_color/image_raw"/>
<remap from="~/camera_info" to="/camera/color/camera_info"/>
<remap from="~/aligned_depth/camera_info" to="/camera/aligned_depth_to_color/camera_info"/>
<param name="fps" value="30"/>
<param name="fps" value="15"/>
<param name="rgb_path" value="$(find-pkg-share feeding_web_app_ros2_test)/../data/$(var rgb_path)"/>
<param name="depth_path" value="$(find-pkg-share feeding_web_app_ros2_test)/../data/$(var depth_path)"/>
</node>
Expand All @@ -39,6 +40,11 @@
<!-- Perception: The FaceDetection node -->
<node pkg="feeding_web_app_ros2_test" exec="FaceDetection" name="FaceDetection"/>
</group>

<group if="$(var run_food_on_fork_detection)">
<!-- Perception: The FoodOnForkDetection node -->
<node pkg="feeding_web_app_ros2_test" exec="FoodOnForkDetection" name="FoodOnForkDetection"/>
</group>

<group if="$(var run_motion)">
<!-- Motion: The MoveAbovePlate action -->
Expand Down
1 change: 1 addition & 0 deletions feeding_web_app_ros2_test/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
"AcquireFoodClient = feeding_web_app_ros2_test.AcquireFoodClient:main",
"DummyRealSense = feeding_web_app_ros2_test.DummyRealSense:main",
"FaceDetection = feeding_web_app_ros2_test.FaceDetection:main",
"FoodOnForkDetection = feeding_web_app_ros2_test.FoodOnForkDetection:main",
"MoveAbovePlate = feeding_web_app_ros2_test.MoveAbovePlate:main",
"MoveToRestingPosition = feeding_web_app_ros2_test.MoveToRestingPosition:main",
"MoveToStagingConfiguration = feeding_web_app_ros2_test.MoveToStagingConfiguration:main",
Expand Down
10 changes: 10 additions & 0 deletions feedingwebapp/src/Pages/Constants.js
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,8 @@ export const CAMERA_FEED_TOPIC = '/local/camera/color/image_raw/compressed'
export const FACE_DETECTION_TOPIC = '/face_detection'
export const FACE_DETECTION_TOPIC_MSG = 'ada_feeding_msgs/FaceDetection'
export const FACE_DETECTION_IMG_TOPIC = '/face_detection_img/compressed'
export const FOOD_ON_FORK_DETECTION_TOPIC = '/food_on_fork_detection'
export const FOOD_ON_FORK_DETECTION_TOPIC_MSG = 'ada_feeding_msgs/FoodOnForkDetection'
export const ROBOT_COMPRESSED_IMG_TOPICS = [CAMERA_FEED_TOPIC, FACE_DETECTION_IMG_TOPIC]

// States from which, if they fail, it is NOT okay for the user to retry the
Expand Down Expand Up @@ -106,6 +108,14 @@ ROS_SERVICE_NAMES[MEAL_STATE.R_DetectingFace] = {
serviceName: 'toggle_face_detection',
messageType: 'std_srvs/srv/SetBool'
}
ROS_SERVICE_NAMES[MEAL_STATE.U_BiteDone] = {
serviceName: 'toggle_food_on_fork_detection',
messageType: 'std_srvs/srv/SetBool'
}
ROS_SERVICE_NAMES[MEAL_STATE.U_BiteAcquisitionCheck] = {
serviceName: 'toggle_food_on_fork_detection',
messageType: 'std_srvs/srv/SetBool'
}
export { ROS_SERVICE_NAMES }
export const CLEAR_OCTOMAP_SERVICE_NAME = 'clear_octomap'
export const CLEAR_OCTOMAP_SERVICE_TYPE = 'std_srvs/srv/Empty'
Expand Down
76 changes: 38 additions & 38 deletions feedingwebapp/src/Pages/GlobalState.jsx
Original file line number Diff line number Diff line change
Expand Up @@ -75,27 +75,6 @@ export const SETTINGS_STATE = {
BITE_TRANSFER: 'BITE_TRANSFER'
}

/**
* The parameters that users can set (keys) and a list of human-readable values
* they can take on.
* - stagingPosition: Discrete options for where the robot should wait until
* the user is ready.
* - biteInitiation: Options for the modality the user wants to use to tell
* the robot they are ready for a bite.
* - TODO: Make these checkboxes instead -- users should be able to
* enable multiple buttons if they so desire.
* - biteSelection: Options for how the user wants to tell the robot what food
* item they want next.
*
* TODO (amaln): When we connect this to ROS, each of these settings types and
* value options will have to have corresponding rosparam names and value options.
*/
// export const SETTINGS = {
// stagingPosition: ['In Front of Me', 'On My Right Side'],
// biteInitiation: ['Open Mouth', 'Say "I am Ready"', 'Press Button'],
// biteSelection: ['Name of Food', 'Click on Food']
// }

/**
* useGlobalState is a hook to store and manipulate web app state that we want
* to persist across re-renders and refreshes. It won't persist if cookies are
Expand Down Expand Up @@ -129,6 +108,16 @@ export const useGlobalState = create(
teleopIsMoving: false,
// Flag to indicate whether to auto-continue after face detection
faceDetectionAutoContinue: true,
// Flag to indicate whether to auto-continue in bite done after food-on-fork detection
biteDoneAutoContinue: false,
biteDoneAutoContinueSecs: 3.0,
biteDoneAutoContinueProbThresh: 0.25,
// Flags to indicate whether to auto-continue in bite acquisition check based on food-on-fork
// detection
biteAcquisitionCheckAutoContinue: false,
biteAcquisitionCheckAutoContinueSecs: 3.0,
biteAcquisitionCheckAutoContinueProbThreshLower: 0.25,
biteAcquisitionCheckAutoContinueProbThreshUpper: 0.75,
// Whether the settings bite transfer page is currently at the user's face
// or not. This is in the off-chance that the mealState is not at the user's
// face, the settings page is, and the user refreshes -- the page should
Expand All @@ -141,11 +130,6 @@ export const useGlobalState = create(
// How much the video on the Bite Selection page should be zoomed in.
biteSelectionZoom: 1.0,

// Settings values
// stagingPosition: SETTINGS.stagingPosition[0],
// biteInitiation: SETTINGS.biteInitiation[0],
// biteSelection: SETTINGS.biteSelection[0],

// Setters for global state
setAppPage: (appPage) =>
set(() => ({
Expand Down Expand Up @@ -196,6 +180,34 @@ export const useGlobalState = create(
set(() => ({
faceDetectionAutoContinue: faceDetectionAutoContinue
})),
setBiteDoneAutoContinue: (biteDoneAutoContinue) =>
set(() => ({
biteDoneAutoContinue: biteDoneAutoContinue
})),
setBiteDoneAutoContinueSecs: (biteDoneAutoContinueSecs) =>
set(() => ({
biteDoneAutoContinueSecs: biteDoneAutoContinueSecs
})),
setBiteDoneAutoContinueProbThresh: (biteDoneAutoContinueProbThresh) =>
set(() => ({
biteDoneAutoContinueProbThresh: biteDoneAutoContinueProbThresh
})),
setBiteAcquisitionCheckAutoContinue: (biteAcquisitionCheckAutoContinue) =>
set(() => ({
biteAcquisitionCheckAutoContinue: biteAcquisitionCheckAutoContinue
})),
setBiteAcquisitionCheckAutoContinueSecs: (biteAcquisitionCheckAutoContinueSecs) =>
set(() => ({
biteAcquisitionCheckAutoContinueSecs: biteAcquisitionCheckAutoContinueSecs
})),
setBiteAcquisitionCheckAutoContinueProbThreshLower: (biteAcquisitionCheckAutoContinueProbThreshLower) =>
set(() => ({
biteAcquisitionCheckAutoContinueProbThreshLower: biteAcquisitionCheckAutoContinueProbThreshLower
})),
setBiteAcquisitionCheckAutoContinueProbThreshUpper: (biteAcquisitionCheckAutoContinueProbThreshUpper) =>
set(() => ({
biteAcquisitionCheckAutoContinueProbThreshUpper: biteAcquisitionCheckAutoContinueProbThreshUpper
})),
setBiteTransferPageAtFace: (biteTransferPageAtFace) =>
set(() => ({
biteTransferPageAtFace: biteTransferPageAtFace
Expand All @@ -204,18 +216,6 @@ export const useGlobalState = create(
set(() => ({
biteSelectionZoom: biteSelectionZoom
}))
// setStagingPosition: (stagingPosition) =>
// set(() => ({
// stagingPosition: stagingPosition
// })),
// setBiteInitiation: (biteInitiation) =>
// set(() => ({
// biteInitiation: biteInitiation
// })),
// setBiteSelection: (biteSelection) =>
// set(() => ({
// biteSelection: biteSelection
// }))
}),
{ name: 'ada_web_app_global_state' }
)
Expand Down
Loading

0 comments on commit ccafe3c

Please sign in to comment.