Skip to content

Commit

Permalink
updated visualization
Browse files Browse the repository at this point in the history
  • Loading branch information
jingyi-xiang committed Jun 22, 2023
1 parent 3620d43 commit 975c71c
Show file tree
Hide file tree
Showing 5 changed files with 95 additions and 53 deletions.
31 changes: 19 additions & 12 deletions launch/trackdlo.launch
Original file line number Diff line number Diff line change
@@ -1,27 +1,32 @@
<launch>
<!-- change the below parameters to match your camera/scene setup -->
<arg name="camera_info_topic" default="/camera/color/camera_info" />
<!-- <arg name="camera_info_topic" default="/camera/aligned_depth_to_color/camera_info" /> -->
<!-- <arg name="camera_info_topic" default="/camera/color/camera_info" /> -->
<arg name="camera_info_topic" default="/camera/aligned_depth_to_color/camera_info" />
<arg name="rgb_topic" default="/camera/color/image_raw" />
<arg name="depth_topic" default="/camera/aligned_depth_to_color/image_raw" />
<arg name="result_frame_id" default="camera_color_optical_frame" />
<arg name="hsv_threshold_upper_limit" default="130 255 255" />
<!-- <arg name="hsv_threshold_lower_limit" default="100 200 60" /> -->
<!-- <arg name="hsv_threshold_lower_limit" default="90 90 30" /> -->
<arg name="hsv_threshold_lower_limit" default="90 90 90" />
<arg name="num_of_nodes" default="40" />
<arg name="hsv_threshold_lower_limit" default="90 90 30" />
<!-- <arg name="hsv_threshold_lower_limit" default="90 90 90" /> -->
<arg name="num_of_nodes" default="25" />
<arg name="visualize_initialization_process" default="false" />

<!-- modifying the below parameters is usually not needed -->
<!-- kernel: 0 -> Laplacian; 1 -> 1st order; 2nd -> 2nd order; 3 -> Gaussian -->
<arg name="kernel" default="1" />
<arg name="multi_color_dlo" default="true" />
<arg name="multi_color_dlo" default="false" />

<!-- the below arg is for quick sanity checking with GLTP.
it should ALWAYS be set to FALSE when running TrackDLO -->
<arg name="gltp" default="false" />

<!-- load parameters to corresponding nodes -->
<node name="trackdlo" pkg="trackdlo" type="trackdlo" output="screen">
<param name="camera_info_topic" type="string" value="$(arg camera_info_topic)" />
<param name="rgb_topic" type="string" value="$(arg rgb_topic)" />
<param name="depth_topic" type="string" value="$(arg depth_topic)" />
<param name="result_frame_id" type="string" value="$(arg result_frame_id)" />

<param name="hsv_threshold_upper_limit" type="string" value="$(arg hsv_threshold_upper_limit)" />
<param name="hsv_threshold_lower_limit" type="string" value="$(arg hsv_threshold_lower_limit)" />
Expand All @@ -30,7 +35,7 @@
<param name="beta" value="1" if="$(eval arg('kernel') == 0)"/>
<param name="lambda" value="50000" if="$(eval arg('kernel') == 0)"/>

<param name="beta" value="0.5" if="$(eval arg('kernel') == 1)"/>
<param name="beta" value="0.35" if="$(eval arg('kernel') == 1)"/>
<param name="lambda" value="50000" if="$(eval arg('kernel') == 1)"/>

<param name="beta" value="0.3" if="$(eval arg('kernel') == 2)"/>
Expand All @@ -52,10 +57,10 @@
<param name="max_iter" value="50" />

<!-- tol: EM optimization convergence tolerance -->
<param name="tol" value="0.0001" />
<param name="tol" value="0.0002" />

<!-- k_vis: the strength of visibility information's effect on membership probability computation -->
<param name="k_vis" value="500" />
<param name="k_vis" value="50" />

<!-- set to false in the registration step because we did not have space in the paper to discuss LLE -->
<param name="include_lle" value="false" />
Expand All @@ -69,10 +74,10 @@
<param name="use_prev_sigma2" value="true" />

<param name="kernel" value="$(arg kernel)" />
<param name="downsample_leaf_size" value="0.005" />
<param name="downsample_leaf_size" value="0.008" />
<param name="multi_color_dlo" type="bool" value="$(arg multi_color_dlo)" />
<param name="visibility_threshold" type="double" value="0.005" />
<param name="dlo_pixel_width" value="30" />
<param name="visibility_threshold" type="double" value="0.008" />
<param name="dlo_pixel_width" value="40" />
<param name="gltp" type="bool" value="$(arg gltp)" />
</node>

Expand All @@ -81,6 +86,8 @@
<param name="camera_info_topic" type="string" value="$(arg camera_info_topic)" />
<param name="rgb_topic" type="string" value="$(arg rgb_topic)" />
<param name="depth_topic" type="string" value="$(arg depth_topic)" />
<param name="result_frame_id" type="string" value="$(arg result_frame_id)" />

<param name="num_of_nodes" value="$(arg num_of_nodes)" />
<param name="multi_color_dlo" type="bool" value="$(arg multi_color_dlo)" />
<param name="visualize_initialization_process" type="bool" value="$(arg visualize_initialization_process)" />
Expand Down
5 changes: 3 additions & 2 deletions trackdlo/src/initialize.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ def callback (rgb, depth):

init_nodes = spline_pts[np.linspace(0, num_true_pts-1, num_of_nodes).astype(int)]

results = ndarray2MarkerArray(init_nodes, "camera_color_optical_frame", [255, 150, 0, 0.75], [0, 255, 0, 0.75])
results = ndarray2MarkerArray(init_nodes, result_frame_id, [1, 150/255, 0, 0.75], [0, 1, 0, 0.75])
results_pub.publish(results)

# add color
Expand All @@ -142,6 +142,7 @@ def callback (rgb, depth):
camera_info_topic = rospy.get_param('/init_tracker/camera_info_topic')
rgb_topic = rospy.get_param('/init_tracker/rgb_topic')
depth_topic = rospy.get_param('/init_tracker/depth_topic')
result_frame_id = rospy.get_param('/init_tracker/result_frame_id')
visualize_initialization_process = rospy.get_param('/init_tracker/visualize_initialization_process')

hsv_threshold_upper_limit = rospy.get_param('/init_tracker/hsv_threshold_upper_limit')
Expand All @@ -159,7 +160,7 @@ def callback (rgb, depth):
# header
header = std_msgs.msg.Header()
header.stamp = rospy.Time.now()
header.frame_id = 'camera_color_optical_frame'
header.frame_id = result_frame_id
fields = [PointField('x', 0, PointField.FLOAT32, 1),
PointField('y', 4, PointField.FLOAT32, 1),
PointField('z', 8, PointField.FLOAT32, 1),
Expand Down
106 changes: 70 additions & 36 deletions trackdlo/src/trackdlo_node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ std::string rgb_topic;
std::string depth_topic;
std::string hsv_threshold_upper_limit;
std::string hsv_threshold_lower_limit;
std::string result_frame_id;
std::vector<int> upper;
std::vector<int> lower;

Expand Down Expand Up @@ -183,9 +184,6 @@ sensor_msgs::ImagePtr Callback(const sensor_msgs::ImageConstPtr& image_msg, cons

cv::cvtColor(mask, mask_rgb, cv::COLOR_GRAY2BGR);

sensor_msgs::PointCloud2 output;
sensor_msgs::PointCloud2 result_pc;

bool simulated_occlusion = false;
int occlusion_corner_i = -1;
int occlusion_corner_j = -1;
Expand Down Expand Up @@ -227,7 +225,7 @@ sensor_msgs::ImagePtr Callback(const sensor_msgs::ImageConstPtr& image_msg, cons
point.y = (pixel_y - cy) * pc_z / fy;
point.z = pc_z;

// currently missing point field so color doesn't show up in rviz
// currently something so color doesn't show up in rviz
point.r = cur_image_orig.at<cv::Vec3b>(i, j)[0];
point.g = cur_image_orig.at<cv::Vec3b>(i, j)[1];
point.b = cur_image_orig.at<cv::Vec3b>(i, j)[2];
Expand Down Expand Up @@ -345,7 +343,15 @@ sensor_msgs::ImagePtr Callback(const sensor_msgs::ImageConstPtr& image_msg, cons
}

// add edges for checking overlap with upcoming nodes
cv::line(projected_edges, cv::Point(col_1, row_1), cv::Point(col_2, row_2), cv::Scalar(255, 255, 255), dlo_pixel_width);
double x1 = col_1;
double y1 = row_1;
double x2 = col_2;
double y2 = row_2;
x1 = x1 + (0.25*dlo_pixel_width) / sqrt(pow(x1-x2, 2) + pow(y1-y2, 2)) * (x2 - x1);
y1 = y1 + (0.25*dlo_pixel_width) / sqrt(pow(x1-x2, 2) + pow(y1-y2, 2)) * (y2 - y1);
x2 = x2 + (0.25*dlo_pixel_width) / sqrt(pow(x1-x2, 2) + pow(y1-y2, 2)) * (x1 - x2);
y2 = y2 + (0.25*dlo_pixel_width) / sqrt(pow(x1-x2, 2) + pow(y1-y2, 2)) * (y1 - y2);
cv::line(projected_edges, cv::Point(x1, y1), cv::Point(x2, y2), cv::Scalar(255, 255, 255), dlo_pixel_width);
}

// sort visible nodes to preserve the original connectivity
Expand All @@ -372,7 +378,7 @@ sensor_msgs::ImagePtr Callback(const sensor_msgs::ImageConstPtr& image_msg, cons
priors = tracker.get_correspondence_pairs();
}
else {
tracker.cpd_lle(X_pruned, Y, sigma2, 1, 1, 1, mu, 50, tol, true, true, true);
tracker.cpd_lle(X_pruned, Y, sigma2, 1, 1, 1, mu, 50, tol, true, false, true);
}

// log time
Expand All @@ -382,6 +388,20 @@ sensor_msgs::ImagePtr Callback(const sensor_msgs::ImageConstPtr& image_msg, cons
cur_time = std::chrono::high_resolution_clock::now();

// projection and pub image
averaged_node_camera_dists = {};
indices_vec = {};
for (int i = 0; i < Y.rows()-1; i ++) {
averaged_node_camera_dists.push_back(((Y.row(i) + Y.row(i+1)) / 2).norm());
indices_vec.push_back(i);
}
// sort
std::sort(indices_vec.begin(), indices_vec.end(),
[&](const int& a, const int& b) {
return (averaged_node_camera_dists[a] < averaged_node_camera_dists[b]);
}
);
std::reverse(indices_vec.begin(), indices_vec.end());

MatrixXd nodes_h = Y.replicate(1, 1);
nodes_h.conservativeResize(nodes_h.rows(), nodes_h.cols()+1);
nodes_h.col(nodes_h.cols()-1) = MatrixXd::Ones(nodes_h.rows(), 1);
Expand All @@ -391,31 +411,44 @@ sensor_msgs::ImagePtr Callback(const sensor_msgs::ImageConstPtr& image_msg, cons
tracking_img = 0.5*cur_image_orig + 0.5*cur_image;

// draw points
for (int i = 0; i < image_coords.rows(); i ++) {
for (int idx : indices_vec) {

int x = static_cast<int>(image_coords(i, 0)/image_coords(i, 2));
int y = static_cast<int>(image_coords(i, 1)/image_coords(i, 2));
int x = static_cast<int>(image_coords(idx, 0)/image_coords(idx, 2));
int y = static_cast<int>(image_coords(idx, 1)/image_coords(idx, 2));

cv::Scalar point_color;
cv::Scalar line_color;

if (std::find(visible_nodes.begin(), visible_nodes.end(), i) != visible_nodes.end()) {
if (std::find(visible_nodes.begin(), visible_nodes.end(), idx) != visible_nodes.end()) {
point_color = cv::Scalar(0, 150, 255);
line_color = cv::Scalar(0, 255, 0);
if (std::find(visible_nodes.begin(), visible_nodes.end(), idx+1) != visible_nodes.end()) {
line_color = cv::Scalar(0, 255, 0);
}
else {
line_color = cv::Scalar(0, 0, 255);
}
}
else {
point_color = cv::Scalar(0, 0, 255);
line_color = cv::Scalar(0, 0, 255);
}

if (i != image_coords.rows()-1) {
cv::line(tracking_img, cv::Point(x, y),
cv::Point(static_cast<int>(image_coords(i+1, 0)/image_coords(i+1, 2)),
static_cast<int>(image_coords(i+1, 1)/image_coords(i+1, 2))),
line_color, 5);
}
cv::line(tracking_img, cv::Point(x, y),
cv::Point(static_cast<int>(image_coords(idx+1, 0)/image_coords(idx+1, 2)),
static_cast<int>(image_coords(idx+1, 1)/image_coords(idx+1, 2))),
line_color, 5);

cv::circle(tracking_img, cv::Point(x, y), 7, point_color, -1);

if (std::find(visible_nodes.begin(), visible_nodes.end(), idx+1) != visible_nodes.end()) {
point_color = cv::Scalar(0, 150, 255);
}
else {
point_color = cv::Scalar(0, 0, 255);
}
cv::circle(tracking_img, cv::Point(static_cast<int>(image_coords(idx+1, 0)/image_coords(idx+1, 2)),
static_cast<int>(image_coords(idx+1, 1)/image_coords(idx+1, 2))),
7, point_color, -1);
}

// add text
Expand All @@ -426,18 +459,11 @@ sensor_msgs::ImagePtr Callback(const sensor_msgs::ImageConstPtr& image_msg, cons
// publish image
tracking_img_msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", tracking_img).toImageMsg();

// publish filtered point cloud
pcl::PCLPointCloud2 cur_pc_pointcloud2;
pcl::toPCLPointCloud2(cur_pc_downsampled, cur_pc_pointcloud2);
cur_pc_pointcloud2.header.frame_id = "camera_color_optical_frame";

// Convert to ROS data type
pcl_conversions::moveFromPCL(cur_pc_pointcloud2, output);

// publish the results as a marker array
visualization_msgs::MarkerArray results = MatrixXd2MarkerArray(Y, "camera_color_optical_frame", "node_results", {1.0, 150.0/255.0, 0.0, 1.0}, {0.0, 1.0, 0.0, 1.0}, 0.01, 0.005);
visualization_msgs::MarkerArray guide_nodes_results = MatrixXd2MarkerArray(guide_nodes, "camera_color_optical_frame", "guide_node_results", {0.0, 0.0, 0.0, 0.5}, {0.0, 0.0, 1.0, 0.5});
visualization_msgs::MarkerArray corr_priors_results = MatrixXd2MarkerArray(priors, "camera_color_optical_frame", "corr_prior_results", {0.0, 0.0, 0.0, 0.5}, {1.0, 0.0, 0.0, 0.5});
// visualization_msgs::MarkerArray results = MatrixXd2MarkerArray(Y, result_frame_id, "node_results", {1.0, 150.0/255.0, 0.0, 1.0}, {0.0, 1.0, 0.0, 1.0}, 0.01, 0.005, visible_nodes, {1.0, 0.0, 0.0, 1.0}, {1.0, 0.0, 0.0, 1.0});
visualization_msgs::MarkerArray results = MatrixXd2MarkerArray(Y, result_frame_id, "node_results", {1.0, 150.0/255.0, 0.0, 1.0}, {0.0, 1.0, 0.0, 1.0}, 0.01, 0.005);
visualization_msgs::MarkerArray guide_nodes_results = MatrixXd2MarkerArray(guide_nodes, result_frame_id, "guide_node_results", {0.0, 0.0, 0.0, 0.5}, {0.0, 0.0, 1.0, 0.5});
visualization_msgs::MarkerArray corr_priors_results = MatrixXd2MarkerArray(priors, result_frame_id, "corr_prior_results", {0.0, 0.0, 0.0, 0.5}, {1.0, 0.0, 0.0, 0.5});

// convert to pointcloud2 for eval
pcl::PointCloud<pcl::PointXYZ> trackdlo_pc;
Expand All @@ -449,19 +475,28 @@ sensor_msgs::ImagePtr Callback(const sensor_msgs::ImageConstPtr& image_msg, cons
trackdlo_pc.points.push_back(temp);
}

// publish filtered point cloud
pcl::PCLPointCloud2 cur_pc_pointcloud2;
pcl::toPCLPointCloud2(cur_pc_downsampled, cur_pc_pointcloud2);
pcl::PCLPointCloud2 result_pc_pclpoincloud2;

pcl::toPCLPointCloud2(trackdlo_pc, result_pc_pclpoincloud2);
pcl_conversions::moveFromPCL(result_pc_pclpoincloud2, result_pc);

result_pc.header.frame_id = "camera_color_optical_frame";
result_pc.header.stamp = image_msg->header.stamp;
// Convert to ROS data type
sensor_msgs::PointCloud2 cur_pc_msg;
sensor_msgs::PointCloud2 result_pc_msg;
pcl_conversions::moveFromPCL(cur_pc_pointcloud2, cur_pc_msg);
pcl_conversions::moveFromPCL(result_pc_pclpoincloud2, result_pc_msg);

// for evaluation sync
cur_pc_msg.header.frame_id = result_frame_id;
result_pc_msg.header.frame_id = result_frame_id;
result_pc_msg.header.stamp = image_msg->header.stamp;

results_pub.publish(results);
guide_nodes_pub.publish(guide_nodes_results);
corr_priors_pub.publish(corr_priors_results);
pc_pub.publish(output);
result_pc_pub.publish(result_pc);
pc_pub.publish(cur_pc_msg);
result_pc_pub.publish(result_pc_msg);

// reset all guide nodes
for (int i = 0; i < guide_nodes_results.markers.size(); i ++) {
Expand All @@ -482,8 +517,6 @@ sensor_msgs::ImagePtr Callback(const sensor_msgs::ImageConstPtr& image_msg, cons
ROS_INFO_STREAM("Avg tracking step: " + std::to_string(algo_total / frames) + " ms");
ROS_INFO_STREAM("Avg pub data: " + std::to_string(pub_data_total / frames) + " ms");
ROS_INFO_STREAM("Avg total: " + std::to_string((pre_proc_total + algo_total + pub_data_total) / frames) + " ms");

// pc_pub.publish(output);
}

return tracking_img_msg;
Expand Down Expand Up @@ -516,6 +549,7 @@ int main(int argc, char **argv) {
nh.getParam("/trackdlo/camera_info_topic", camera_info_topic);
nh.getParam("/trackdlo/rgb_topic", rgb_topic);
nh.getParam("/trackdlo/depth_topic", depth_topic);
nh.getParam("/trackdlo/result_frame_id", result_frame_id);

nh.getParam("/trackdlo/hsv_threshold_upper_limit", hsv_threshold_upper_limit);
nh.getParam("/trackdlo/hsv_threshold_lower_limit", hsv_threshold_lower_limit);
Expand Down
2 changes: 1 addition & 1 deletion trackdlo/src/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ def extract_connected_skeleton (visualize_process, mask, img_scale=10, seg_lengt

# smooth image
im = Image.fromarray(mask)
smoothed_im = im.filter(ImageFilter.ModeFilter(size=10))
smoothed_im = im.filter(ImageFilter.ModeFilter(size=15))
mask = np.array(smoothed_im)

# resize if necessary for better skeletonization performance
Expand Down
4 changes: 2 additions & 2 deletions utils/pub_camera_tf.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@
static_transformStamped_5.child_frame_id = "camera_color_optical_frame"

# current camera pos
# trans_5 = [0.5308947503950723, 0.030109485611943067, 0.50]
trans_5 = [0.5308947503950723, 0.030109485611943067, 0.6311476272663069]
trans_5 = [0.5308947503950723, 0.030109485611943067, 0.50]
# trans_5 = [0.5308947503950723, 0.030109485611943067, 0.6311476272663069]
quat_5 = [-0.7065771296991245, 0.7075322875283535, 0.0004147019946593735, 0.012109909714664245]

static_transformStamped_5.transform.translation.x = trans_5[0]
Expand Down

0 comments on commit 975c71c

Please sign in to comment.