Skip to content

Commit 57437e8

Browse files
#789 Detection ok for all patterns. Renaming to riwbot
1 parent 3b7d195 commit 57437e8

File tree

72 files changed

+837
-372
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

72 files changed

+837
-372
lines changed

atom_calibration/src/atom_calibration/collect/patterns.py

+34-11
Original file line numberDiff line numberDiff line change
@@ -37,16 +37,17 @@ def detect(self, image, equalize_histogram=False):
3737

3838
return {"detected": True, 'keypoints': sub_pixel_corners, 'ids': range(0, len(sub_pixel_corners))}
3939

40-
def drawKeypoints(self, image, result, K=None, D=None):
40+
def drawKeypoints(self, image, result, length=0.2, color=(1, 0, 0), K=None, D=None, pattern_name=None):
41+
4142
if result['keypoints'] is None or len(result['keypoints']) == 0:
4243
return
4344

4445
if not result['detected']:
4546
return
4647

4748
for point in result['keypoints']:
48-
cv2.drawMarker(image, (int(point[0][0]), int(point[0][1])), (0, 0, 255), cv2.MARKER_CROSS, 14)
49-
cv2.circle(image, (int(point[0][0]), int(point[0][1])), 7, (0, 255, 0), lineType=cv2.LINE_AA)
49+
cv2.drawMarker(image, (int(point[0][0]), int(point[0][1])), color, cv2.MARKER_CROSS, 14)
50+
cv2.circle(image, (int(point[0][0]), int(point[0][1])), 7, color, lineType=cv2.LINE_AA)
5051

5152
if K is not None and D is not None: # estimate pose and draw axis on image
5253

@@ -71,7 +72,11 @@ def drawKeypoints(self, image, result, K=None, D=None):
7172
_, rvecs, tvecs = cv2.solvePnP(
7273
objp[ids], np.array(corners, dtype=np.float32), K, D)
7374

74-
cv2.drawFrameAxes(image, K, D, rvecs, tvecs, 0.3)
75+
cv2.drawFrameAxes(image, K, D, rvecs, tvecs, length)
76+
77+
if pattern_name is not None:
78+
point = (int(corners[0][0][0]), int(corners[0][0][1]))
79+
cv2.putText(image, pattern_name, point, cv2.FONT_HERSHEY_SIMPLEX, 1.0, color, 2)
7580

7681

7782
class CharucoPattern(object):
@@ -107,12 +112,12 @@ def __init__(self, size, length, marker_length, dictionary='DICT_5X5_100'):
107112

108113
if cv2.__version__ == '4.6.0':
109114
self.dictionary = cv2.aruco.Dictionary_get(charuco_dictionary)
110-
self.board = cv2.aruco.CharucoBoard_create(size["y"] + 1, size["x"] + 1, length, marker_length,
115+
self.board = cv2.aruco.CharucoBoard_create(size["x"] + 1, size["y"] + 1, length, marker_length,
111116
self.dictionary)
112-
117+
# self.image_board = self.board.draw((787, 472))
113118
else: # all versions from 4.7.0 onward
114119
self.dictionary = cv2.aruco.getPredefinedDictionary(charuco_dictionary)
115-
self.board = cv2.aruco.CharucoBoard((size["y"] + 1, size["x"] + 1), length, marker_length,
120+
self.board = cv2.aruco.CharucoBoard((size["x"] + 1, size["y"] + 1), length, marker_length,
116121
self.dictionary)
117122

118123
def detect(self, image, equalize_histogram=False):
@@ -129,13 +134,25 @@ def detect(self, image, equalize_histogram=False):
129134
if cv2.__version__ == '4.6.0':
130135
params = cv2.aruco.DetectorParameters_create()
131136
corners, ids, rejected = cv2.aruco.detectMarkers(gray, self.dictionary, parameters=params)
137+
138+
# Debug
139+
# image_gui = deepcopy(image)
140+
# cv2.aruco.drawDetectedMarkers(image_gui, corners, ids)
141+
# cv2.namedWindow('Debug image', cv2.WINDOW_NORMAL)
142+
# cv2.imshow('Debug image', image_gui)
143+
#
144+
# cv2.namedWindow('image board', cv2.WINDOW_NORMAL)
145+
# cv2.imshow('image board', self.image_board)
146+
#
147+
# cv2.waitKey(30)
148+
132149
# cv2.aruco.refineDetectedMarkers(gray, self.board, corners, ids, rejected)
133150
else:
134151
params = cv2.aruco.DetectorParameters()
135152
detector = cv2.aruco.ArucoDetector(self.dictionary, params)
136153
corners, ids, rejected = detector.detectMarkers(gray)
137154

138-
if len(corners) <= 4: # Must have more than 3 corner detections
155+
if len(corners) <= 8: # Must have more than 3 corner detections
139156
return {"detected": False, 'keypoints': np.array([]), 'ids': []}
140157

141158
# Interpolation of charuco corners
@@ -153,7 +170,7 @@ def detect(self, image, equalize_histogram=False):
153170
# If all above works, return detected corners.
154171
return {'detected': True, 'keypoints': ccorners, 'ids': cids.ravel().tolist()}
155172

156-
def drawKeypoints(self, image, result, K=None, D=None):
173+
def drawKeypoints(self, image, result, length=0.2, color=(255, 0, 0), K=None, D=None, pattern_name=None):
157174
if result['keypoints'] is None or len(result['keypoints']) == 0:
158175
return
159176

@@ -164,6 +181,7 @@ def drawKeypoints(self, image, result, K=None, D=None):
164181
# Build a numpy array with the chessboard corners
165182
ccorners = np.zeros((len(result['keypoints']), 1, 2), dtype=float)
166183
cids = list(range(0, len(result['keypoints'])))
184+
num_corners = len(cids)
167185

168186
points = result['keypoints'].astype(np.int32)
169187
for idx, (point, id) in enumerate(zip(result['keypoints'], result['ids'])):
@@ -175,15 +193,20 @@ def drawKeypoints(self, image, result, K=None, D=None):
175193
np_ccorners = np.array(ccorners, dtype=np.float32)
176194

177195
# Draw charuco corner detection
178-
image = cv2.aruco.drawDetectedCornersCharuco(image, np_ccorners, np_cids, (0, 0, 255))
196+
image = cv2.aruco.drawDetectedCornersCharuco(image, np_ccorners, np_cids, color)
179197

180198
if K is not None and D is not None: # estimate pose and draw axis on image
181199
rvecs, tvecs = None, None
182200
_, rvecs, tvecs = cv2.aruco.estimatePoseCharucoBoard(np_ccorners,
183201
np_cids, self.board,
184202
K, D, rvecs, tvecs)
185203
# Draw frame on the image
186-
cv2.drawFrameAxes(image, K, D, rvecs, tvecs, 0.3)
204+
cv2.drawFrameAxes(image, K, D, rvecs, tvecs, length)
205+
206+
if pattern_name is not None:
207+
point = tuple(ccorners[0][0])
208+
point = (int(point[0]), int(point[1]))
209+
cv2.putText(image, pattern_name, point, cv2.FONT_HERSHEY_SIMPLEX, 1.0, color, 2)
187210

188211

189212
def initializePatternsDict(config, step=0.02):
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,198 @@
1+
#!/usr/bin/env python3
2+
3+
4+
# stdlib
5+
import argparse
6+
from copy import deepcopy
7+
import os
8+
import sys
9+
10+
import cv2
11+
from atom_core.config_io import loadConfig
12+
13+
import atom_core.ros_utils
14+
15+
# 3rd-party
16+
import rospy
17+
import numpy as np
18+
import atom_core.utilities
19+
from cv_bridge import CvBridge
20+
from matplotlib import cm
21+
from sensor_msgs.msg import *
22+
from sensor_msgs.msg import CameraInfo
23+
from sensor_msgs.msg import CameraInfo
24+
from colorama import Fore, Style
25+
26+
# local packages
27+
from atom_calibration.collect import patterns
28+
from atom_core.utilities import atomError
29+
30+
from atom_msgs.msg import ImageWithRGBLabels, RGBLabels, Detection2D
31+
32+
33+
class RGBLabeler:
34+
"""
35+
Handles data labeling for a generic sensor:
36+
RGB: Fully automated labeling. Periodically runs a chessboard detection on the newly received image.
37+
"""
38+
39+
def __init__(self, config, sensor_name, label_data=True, debug=False):
40+
41+
print('Starting RGB labeler for sensor ' + str(sensor_name))
42+
43+
# Store variables to class attributes
44+
self.config = config
45+
self.label_data = label_data
46+
self.sensor_name = sensor_name
47+
self.debug = debug
48+
49+
# Check if sensor exists in config
50+
if self.sensor_name not in config['sensors'].keys():
51+
atomError('Sensor ' + Fore.BLUE + sensor_name + Style.RESET_ALL + ' not in config. Cannot start labeler.\nAvailable sensors are: ' +
52+
Fore.BLUE + str(list(config['sensors'].keys())) + Style.RESET_ALL)
53+
54+
self.sensor_config = config['sensors'][sensor_name]
55+
56+
# Check if modality is rgb
57+
if not self.sensor_config['modality'] == 'rgb':
58+
atomError('Sensor ' + sensor_name + ' has modality ' +
59+
self.sensor_config['modality'] + ' . Cannot start rgb labeler.')
60+
61+
# Get the type of message from the message topic of the sensor data, which is given as input.
62+
self.msg_type_str, self.msg_type = atom_core.ros_utils.getMessageTypeFromTopic(self.sensor_config['topic_name'])
63+
64+
# Create a self.detectors dictionary with all one detector class instantiated for each pattern
65+
self.detectors = {}
66+
for pattern_key, pattern in self.config['calibration_patterns'].items():
67+
if pattern['pattern_type'] == 'chessboard':
68+
self.detectors[pattern_key] = patterns.ChessboardPattern(pattern['dimension'],
69+
pattern['size'])
70+
elif pattern['pattern_type'] == 'charuco':
71+
self.detectors[pattern_key] = patterns.CharucoPattern(pattern['dimension'],
72+
pattern['size'],
73+
pattern['inner_size'],
74+
pattern['dictionary'])
75+
else:
76+
atomError('Unknown pattern type ' + str(pattern['pattern_type']))
77+
78+
self.cm_patterns = cm.tab10(np.linspace(0, 1, len(self.config['calibration_patterns'].keys())))
79+
80+
# Set up the labeled image publication
81+
self.bridge = CvBridge() # a CvBridge structure is needed to convert opencv images to ros messages.
82+
self.labeled_image_topic = self.sensor_config['topic_name'] + '/labeled'
83+
self.publisher_labeled_image = rospy.Publisher(self.labeled_image_topic,
84+
sensor_msgs.msg.Image,
85+
queue_size=1) # publish
86+
87+
# Set up the publication of the produced labels
88+
self.publisher_image_with_rgb_labels = rospy.Publisher(self.sensor_name + '/labels',
89+
ImageWithRGBLabels, queue_size=1)
90+
91+
# Receive one camera_info message corresponding to this sensor and store it.
92+
# We need this to have the K and D matrices.
93+
camera_info_topic = os.path.dirname(self.sensor_config['topic_name']) + '/camera_info'
94+
print('Waiting for camera_info message on topic ' + camera_info_topic + ' ...')
95+
self.camera_info_msg = rospy.wait_for_message(camera_info_topic, CameraInfo)
96+
print('... received!')
97+
98+
self.K = np.ndarray((3, 3), dtype=float, buffer=np.array(self.camera_info_msg.K))
99+
self.D = np.ndarray((5, 1), dtype=float, buffer=np.array(self.camera_info_msg.D))
100+
101+
# self.camera_info = message_converter.convert_ros_message_to_dictionary(self.camera_info_msg)
102+
103+
# Subscribe to the message topic containing sensor data
104+
self.subscriber = rospy.Subscriber(self.sensor_config['topic_name'], self.msg_type,
105+
self.labelData, queue_size=1)
106+
107+
def labelData(self, msg):
108+
109+
if not self.labelData: # Nothing to do in this case
110+
return
111+
112+
if self.debug:
113+
print('Image received. Labeling data for sensor ' + self.sensor_name)
114+
115+
# Convert to opencv image and save image to disk
116+
image = self.bridge.imgmsg_to_cv2(msg, "bgr8")
117+
image_gui = deepcopy(image)
118+
119+
# Create the labels by detecting all patterns in the image
120+
image_with_rgb_labels_msg = ImageWithRGBLabels()
121+
image_with_rgb_labels_msg.image = msg # image is the one use for labeling
122+
for detector_idx, (detector_key, detector) in enumerate(self.detectors.items()):
123+
124+
# Initialize labels for this pattern
125+
pattern_labels = RGBLabels()
126+
pattern_labels.detected = False
127+
pattern_labels.idxs = []
128+
129+
# Detect this pattern in the image
130+
result = detector.detect(image, equalize_histogram=False)
131+
132+
if result['detected']:
133+
pattern_labels.detected = True
134+
for idx, corner in enumerate(result['keypoints']):
135+
x = float(corner[0][0])
136+
y = float(corner[0][1])
137+
138+
# The charuco pattern also returns an ID for each keypoint.
139+
# We can use this information for partial detections.
140+
if 'ids' in result:
141+
id = result['ids'][idx]
142+
else:
143+
id = idx
144+
145+
pattern_labels.idxs.append(Detection2D(x=x, y=y, id=id))
146+
147+
# Add the labels if this pattern to the ImageWithLabels msg
148+
image_with_rgb_labels_msg.patterns.append(pattern_labels)
149+
150+
# For visual debugging
151+
color = self.cm_patterns[detector_idx, 0:3]
152+
color = int(color[0]*255), int(color[1]*255), int(color[2]*255)
153+
154+
detector.drawKeypoints(image_gui, result, K=self.K, D=self.D, pattern_name=detector_key, color=color)
155+
156+
# Publish ImageWithLabels msg
157+
self.publisher_image_with_rgb_labels.publish(image_with_rgb_labels_msg)
158+
# labels_msg.labels =
159+
# self.camera_info = message_converter.convert_ros_message_to_dictionary(self.camera_info_msg)
160+
161+
# Create labeled image and publish it
162+
msg_out = self.bridge.cv2_to_imgmsg(image_gui, encoding="passthrough")
163+
msg_out.header.stamp = msg.header.stamp
164+
msg_out.header.frame_id = msg.header.frame_id
165+
self.publisher_labeled_image.publish(msg_out)
166+
167+
if self.debug:
168+
cv2.namedWindow(self.labeled_image_topic, cv2.WINDOW_NORMAL)
169+
cv2.imshow(self.labeled_image_topic, image_gui)
170+
key = cv2.waitKey(30)
171+
if key & 0xff == ord('q'):
172+
rospy.signal_shutdown(1)
173+
174+
175+
def main():
176+
177+
# Parse command line arguments
178+
ap = argparse.ArgumentParser()
179+
ap.add_argument("-c", "--calibration_file", help='full path to calibration file.', type=str, required=True)
180+
ap.add_argument("-sn", "--sensor_name", help='Name of the sensor as given in the config.yml', type=str, required=True)
181+
ap.add_argument("-d", "--debug", help='Run in debug mode', action='store_true', default=False)
182+
183+
args = vars(ap.parse_args(args=atom_core.ros_utils.filterLaunchArguments(sys.argv)))
184+
print("\nArgument list=" + str(args) + '\n')
185+
186+
config = loadConfig(args['calibration_file'])
187+
188+
# Initialize ROS stuff
189+
node_name = args['sensor_name'] + '_rgb_labeler'
190+
rospy.init_node(node_name)
191+
192+
rgb_labeler = RGBLabeler(config, sensor_name=args['sensor_name'], debug=args['debug'])
193+
194+
rospy.spin()
195+
196+
197+
if __name__ == '__main__':
198+
main()

atom_examples/rihmpbot/rihmpbot_calibration/calibration/config.yml

+3-4
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ calibration_patterns:
124124
# dictionary: "DICT_6X6_100"
125125
# mesh_file: "package://atom_worlds/pattern/models/charuco_200x150_4x6_30_22_DICT_6X6/model.dae"
126126
# border_size: {'x': 0.01, 'y': 0.015} # measured in meshlab
127-
# dimension: {'x': 3, 'y': 5} # for charuco patterns in atom we put dimension -1
127+
# dimension: {'x': 5, 'y': 3} # for charuco patterns in atom we put dimension -1
128128
# size: 0.025
129129
# inner_size: 0.018
130130
charuco_170x100_3x6:
@@ -135,7 +135,7 @@ calibration_patterns:
135135
dictionary: "DICT_6X6_100"
136136
mesh_file: "package://atom_worlds/pattern/models/charuco_170x100_3x6_25_18_DICT_6X6/model.dae"
137137
border_size: {'x': 0.01, 'y': 0.01235} # measured in meshlab
138-
dimension: {'x': 2, 'y': 5} # for charuco patterns in atom we put dimension -1
138+
dimension: {'x': 5, 'y': 2} # for charuco patterns in atom we put dimension -1
139139
size: 0.025
140140
inner_size: 0.018
141141

@@ -147,7 +147,7 @@ calibration_patterns:
147147
dictionary: "DICT_7X7_100"
148148
mesh_file: "package://atom_worlds/pattern/models/charuco_200x120_3x6_30_22_DICT_7X7/model.dae"
149149
border_size: {'x': 0.01, 'y': 0.015} # measured in meshlab
150-
dimension: {'x': 2, 'y': 5} # for charuco patterns in atom we put dimension -1
150+
dimension: {'x': 5, 'y': 2} # for charuco patterns in atom we put dimension -1
151151
size: 0.030
152152
inner_size: 0.022
153153

@@ -176,7 +176,6 @@ calibration_patterns:
176176
# inner_size: 0.045
177177

178178

179-
180179
# Miscellaneous configuration
181180

182181
# If your calibration problem is not fully constrained you should anchored one of the sensors.

0 commit comments

Comments
 (0)