From 49e681242459278d6c4b0a9771ff181a91f4ee7d Mon Sep 17 00:00:00 2001 From: Xuesong Shi Date: Sun, 5 Jul 2020 23:38:06 +0800 Subject: [PATCH] first commit Signed-off-by: Xuesong Shi --- .gitignore | 1 + README.md | 73 +++++++ feature_extraction/CMakeLists.txt | 204 ++++++++++++++++++ feature_extraction/feature_extraction_node.py | 131 +++++++++++ feature_extraction/hfnet_tf.py | 112 ++++++++++ feature_extraction/hfnet_vino.py | 119 ++++++++++ feature_extraction/package.xml | 62 ++++++ feature_extraction/show_keypoints.py | 52 +++++ feature_extraction/show_match.py | 79 +++++++ image_feature_msgs/CMakeLists.txt | 36 ++++ image_feature_msgs/LICENSE | 202 +++++++++++++++++ image_feature_msgs/msg/ImageFeatures.msg | 20 ++ image_feature_msgs/msg/KeyPoint.msg | 17 ++ image_feature_msgs/package.xml | 28 +++ 14 files changed, 1136 insertions(+) create mode 100644 .gitignore create mode 100644 README.md create mode 100644 feature_extraction/CMakeLists.txt create mode 100755 feature_extraction/feature_extraction_node.py create mode 100644 feature_extraction/hfnet_tf.py create mode 100644 feature_extraction/hfnet_vino.py create mode 100644 feature_extraction/package.xml create mode 100755 feature_extraction/show_keypoints.py create mode 100755 feature_extraction/show_match.py create mode 100644 image_feature_msgs/CMakeLists.txt create mode 100644 image_feature_msgs/LICENSE create mode 100644 image_feature_msgs/msg/ImageFeatures.msg create mode 100644 image_feature_msgs/msg/KeyPoint.msg create mode 100644 image_feature_msgs/package.xml diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..bee8a64 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +__pycache__ diff --git a/README.md b/README.md new file mode 100644 index 0000000..8384afc --- /dev/null +++ b/README.md @@ -0,0 +1,73 @@ +# ROS toolkit for deep feature extraction + +This repo contains the following ROS packages: +- feature_extraction: real-time extraction of image features (keypoints and their descriptors and scores, and per-image global descriptors) +- image_feature_msgs: definition of feature messages + +# Setup + +### System requirement + +- Ubuntu 18.04 + ROS Melodic (recommended version) +- Python 3.6 or higher +- TensorFlow 1.12 or higher (`pip3 install tensorflow`) +- (optinoal) OpenVINO 2019 R3 or higher ([download](https://software.intel.com/en-us/openvino-toolkit/choose-download)) +- OpenCV for Python3 (`pip3 install opencv-python`; not needed if OpenVINO is installed and activated) +- numpy (`pip3 install numpy`) +- No GPU requirement + +### Download and build + +0. Preliminary +``` +sudo apt install python3-dev python-catkin-tools python3-catkin-pkg-modules python3-rospkg-modules python3-empy python3-yaml +``` + +1. Set up catkin workspace and download this repo +``` +mkdir src && cd src +git clone https://github.com/cedrusx/deep_features_ros.git +``` + +2. Download cv_bridge and configure it for Python3 (required by feature_extraction for using cv_bridge in Python3) +``` +git clone -b melodic https://github.com/ros-perception/vision_opencv.git +cd .. +# change the path in the following command according to your Python version +catkin config -DPYTHON_EXECUTABLE=/usr/bin/python3 -DPYTHON_INCLUDE_DIR=/usr/include/python3.6m -DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.6m.so +``` + +3. Build +``` +. /opt/ros/melodic/setup.bash +catkin build +``` + +4. Donwload one of the saved [HF-Net](https://github.com/ethz-asl/hfnet) models from [here](https://github.com/cedrusx/open_deep_features/releases/tag/model_release_1), and unzip it. + +# Run + +### Feature extraction + +Start the feature extraction node, which will subscribe to one or more image topic(s) and publish the extracted image features on corresponding topic(s) with `/features` suffix. +``` +. YOUR_PATH_TO_CATKIN_WS/devel/setup.bash +``` + +With OpenVINO model: +``` +. /opt/intel/openvino/bin/setupvars.sh +rosrun feature_extraction feature_extraction_node.py _net:=hfnet_vino _model_path:=YOUR_PATH_TO/models/hfnet_vino_480x640 +``` + +With TensorFlow model: +``` +rosrun feature_extraction feature_extraction_node.py _net:=hfnet_tf _model_path:=YOUR_PATH_TO/models/hfnet_tf +``` + +Additional params and their default values: +``` +_keypoint_number:=500 \ +_gui=True \ +``` + diff --git a/feature_extraction/CMakeLists.txt b/feature_extraction/CMakeLists.txt new file mode 100644 index 0000000..51e3c1b --- /dev/null +++ b/feature_extraction/CMakeLists.txt @@ -0,0 +1,204 @@ +cmake_minimum_required(VERSION 2.8.3) +project(feature_extraction) + +## Compile as C++11, supported in ROS Kinetic and newer +# add_compile_options(-std=c++11) + +## Find catkin macros and libraries +## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) +## is used, also find other catkin packages +find_package(catkin REQUIRED COMPONENTS + rospy +) + +## System dependencies are found with CMake's conventions +# find_package(Boost REQUIRED COMPONENTS system) + + +## Uncomment this if the package has a setup.py. This macro ensures +## modules and global scripts declared therein get installed +## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html +# catkin_python_setup() + +################################################ +## Declare ROS messages, services and actions ## +################################################ + +## To declare and build messages, services or actions from within this +## package, follow these steps: +## * Let MSG_DEP_SET be the set of packages whose message types you use in +## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). +## * In the file package.xml: +## * add a build_depend tag for "message_generation" +## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET +## * If MSG_DEP_SET isn't empty the following dependency has been pulled in +## but can be declared for certainty nonetheless: +## * add a exec_depend tag for "message_runtime" +## * In this file (CMakeLists.txt): +## * add "message_generation" and every package in MSG_DEP_SET to +## find_package(catkin REQUIRED COMPONENTS ...) +## * add "message_runtime" and every package in MSG_DEP_SET to +## catkin_package(CATKIN_DEPENDS ...) +## * uncomment the add_*_files sections below as needed +## and list every .msg/.srv/.action file to be processed +## * uncomment the generate_messages entry below +## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) + +## Generate messages in the 'msg' folder +# add_message_files( +# FILES +# Message1.msg +# Message2.msg +# ) + +## Generate services in the 'srv' folder +# add_service_files( +# FILES +# Service1.srv +# Service2.srv +# ) + +## Generate actions in the 'action' folder +# add_action_files( +# FILES +# Action1.action +# Action2.action +# ) + +## Generate added messages and services with any dependencies listed here +# generate_messages( +# DEPENDENCIES +# std_msgs # Or other packages containing msgs +# ) + +################################################ +## Declare ROS dynamic reconfigure parameters ## +################################################ + +## To declare and build dynamic reconfigure parameters within this +## package, follow these steps: +## * In the file package.xml: +## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" +## * In this file (CMakeLists.txt): +## * add "dynamic_reconfigure" to +## find_package(catkin REQUIRED COMPONENTS ...) +## * uncomment the "generate_dynamic_reconfigure_options" section below +## and list every .cfg file to be processed + +## Generate dynamic reconfigure parameters in the 'cfg' folder +# generate_dynamic_reconfigure_options( +# cfg/DynReconf1.cfg +# cfg/DynReconf2.cfg +# ) + +################################### +## catkin specific configuration ## +################################### +## The catkin_package macro generates cmake config files for your package +## Declare things to be passed to dependent projects +## INCLUDE_DIRS: uncomment this if your package contains header files +## LIBRARIES: libraries you create in this project that dependent projects also need +## CATKIN_DEPENDS: catkin_packages dependent projects also need +## DEPENDS: system dependencies of this project that dependent projects also need +catkin_package( +# INCLUDE_DIRS include +# LIBRARIES feature_extraction +# CATKIN_DEPENDS rospy +# DEPENDS system_lib +) + +########### +## Build ## +########### + +## Specify additional locations of header files +## Your package locations should be listed before other locations +include_directories( +# include + ${catkin_INCLUDE_DIRS} +) + +## Declare a C++ library +# add_library(${PROJECT_NAME} +# src/${PROJECT_NAME}/feature_extraction.cpp +# ) + +## Add cmake target dependencies of the library +## as an example, code may need to be generated before libraries +## either from message generation or dynamic reconfigure +# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) + +## Declare a C++ executable +## With catkin_make all packages are built within a single CMake context +## The recommended prefix ensures that target names across packages don't collide +# add_executable(${PROJECT_NAME}_node src/feature_extraction_node.cpp) + +## Rename C++ executable without prefix +## The above recommended prefix causes long target names, the following renames the +## target back to the shorter version for ease of user use +## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" +# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") + +## Add cmake target dependencies of the executable +## same as for the library above +# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) + +## Specify libraries to link a library or executable target against +# target_link_libraries(${PROJECT_NAME}_node +# ${catkin_LIBRARIES} +# ) + +############# +## Install ## +############# + +# all install targets should use catkin DESTINATION variables +# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html + +## Mark executable scripts (Python etc.) for installation +## in contrast to setup.py, you can choose the destination +# catkin_install_python(PROGRAMS +# scripts/my_python_script +# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +# ) + +## Mark executables for installation +## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html +# install(TARGETS ${PROJECT_NAME}_node +# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +# ) + +## Mark libraries for installation +## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html +# install(TARGETS ${PROJECT_NAME} +# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} +# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} +# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} +# ) + +## Mark cpp header files for installation +# install(DIRECTORY include/${PROJECT_NAME}/ +# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} +# FILES_MATCHING PATTERN "*.h" +# PATTERN ".svn" EXCLUDE +# ) + +## Mark other files for installation (e.g. launch and bag files, etc.) +# install(FILES +# # myfile1 +# # myfile2 +# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} +# ) + +############# +## Testing ## +############# + +## Add gtest based cpp test target and link libraries +# catkin_add_gtest(${PROJECT_NAME}-test test/test_feature_extraction.cpp) +# if(TARGET ${PROJECT_NAME}-test) +# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) +# endif() + +## Add folders to be run by python nosetests +# catkin_add_nosetests(test) diff --git a/feature_extraction/feature_extraction_node.py b/feature_extraction/feature_extraction_node.py new file mode 100755 index 0000000..493a519 --- /dev/null +++ b/feature_extraction/feature_extraction_node.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python3 +import os +import cv2 +import numpy as np +import time +import rospy +from sensor_msgs.msg import Image +from cv_bridge import CvBridge +from image_feature_msgs.msg import ImageFeatures, KeyPoint +from std_msgs.msg import MultiArrayDimension +import threading + +def main(): + rospy.init_node('feature_extraction_node') + net_name = rospy.get_param('~net', 'hfnet_tf') + # user can set more than one input image topics, e.g. /cam1/image,/cam2/image + topics = rospy.get_param('~topics', '/d400/color/image_raw') + gui = rospy.get_param('~gui', True) + if net_name == 'hfnet_vino': + from hfnet_vino import FeatureNet, default_config + elif net_name == 'hfnet_tf': + from hfnet_tf import FeatureNet, default_config + else: + exit('Unknown net %s' % net_name) + config = default_config + config['keypoint_number'] = rospy.get_param('~keypoint_number', config['keypoint_number']) + config['model_path'] = rospy.get_param('~model_path', config['model_path']) + net = FeatureNet() + node = Node(net, gui) + for topic in topics.split(','): + node.subscribe(topic) + rospy.spin() + +class Node(): + def __init__(self, net, gui): + self.net = net + self.gui = gui + self.cv_bridge = CvBridge() + self.publishers = {} + self.subscribers = {} + self.latest_msgs = {} + self.lock = threading.Lock() # protect latest_msgs + self.thread = threading.Thread(target=self.worker) + self.thread.start() + + def subscribe(self, topic): + output_topic = '/'.join(topic.split('/')[:-1]) + '/features' + self.publishers[topic] = rospy.Publisher(output_topic, ImageFeatures, queue_size=1) + with self.lock: + self.latest_msgs[topic] = None + callback = lambda msg: self.callback(msg, topic) + self.subscribers[topic] = rospy.Subscriber(topic, Image, callback, queue_size=1) + + def callback(self, msg, topic): + # keep only the lastest message + with self.lock: + self.latest_msgs[topic] = msg + + def worker(self): + while not rospy.is_shutdown(): + no_new_msg = True + # take turn to process each topic + for topic in self.latest_msgs.keys(): + with self.lock: + msg = self.latest_msgs[topic] + self.latest_msgs[topic] = None + if msg is None: + rospy.loginfo_throttle(3, topic + ': no message received') + continue + self.process(msg, topic) + no_new_msg = False + if no_new_msg: time.sleep(0.01) + + def process(self, msg, topic): + start_time = time.time() + if msg.encoding == '8UC1' or msg.encoding == 'mono8': + image_gray = self.cv_bridge.imgmsg_to_cv2(msg) + if self.gui: image_color = cv2.cvtColor(image_gray, cv2.COLOR_GRAY2BGR) + else: + image_color = self.cv_bridge.imgmsg_to_cv2(msg, 'bgr8') + image_gray = cv2.cvtColor(image_color, cv2.COLOR_BGR2GRAY) + t2 = time.time() + features = self.net.infer(image_gray) + t3 = time.time() + if (features['keypoints'].shape[0] != 0): + feature_msg = features_to_ros_msg(features, msg) + self.publishers[topic].publish(feature_msg) + end_time = time.time() + rospy.loginfo(topic + ': %.2f | %.2f ms (%d keypoints)' % ( + (end_time-start_time) * 1000, + (t3 - t2) * 1000, + features['keypoints'].shape[0])) + if self.gui: + draw_keypoints(image_color, features['keypoints'], features['scores']) + cv2.imshow(topic, image_color) + cv2.waitKey(1) + +def draw_keypoints(image, keypoints, scores): + upper_score = 0.5 + lower_score = 0.1 + scale = 1 / (upper_score - lower_score) + for p,s in zip(keypoints, scores): + s = min(max(s - lower_score, 0) * scale, 1) + color = (255 * (1 - s), 255 * (1 - s), 255) # BGR + cv2.circle(image, tuple(p), 3, color, 2) + +def features_to_ros_msg(features, img_msg): + msg = ImageFeatures() + msg.header = img_msg.header + msg.sorted_by_score.data = False + for kp in features['keypoints']: + p = KeyPoint() + p.x = kp[0] + p.y = kp[1] + msg.keypoints.append(p) + msg.scores = features['scores'].flatten() + msg.descriptors.data = features['local_descriptors'].flatten() + shape = features['local_descriptors'][0].shape + msg.descriptors.layout.dim.append(MultiArrayDimension()) + msg.descriptors.layout.dim[0].label = 'keypoint' + msg.descriptors.layout.dim[0].size = shape[0] + msg.descriptors.layout.dim[0].stride = shape[0] * shape[1] + msg.descriptors.layout.dim.append(MultiArrayDimension()) + msg.descriptors.layout.dim[1].label = 'descriptor' + msg.descriptors.layout.dim[1].size = shape[1] + msg.descriptors.layout.dim[1].stride = shape[1] + msg.global_descriptor = features['global_descriptor'][0] + return msg + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/feature_extraction/hfnet_tf.py b/feature_extraction/hfnet_tf.py new file mode 100644 index 0000000..f090fe3 --- /dev/null +++ b/feature_extraction/hfnet_tf.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python3 + +import tensorflow as tf +from tensorflow.python.ops import gen_nn_ops +from tensorflow.python.saved_model import tag_constants +import cv2 +import numpy as np +tf.contrib.resampler + +default_config = { + 'model_path': 'models/hfnet_tf', + 'keypoint_number': 500, + 'keypoint_threshold': 0.002, + 'nms_iterations': 1, + 'nms_radius': 1, +} + + +class FeatureNet: + def __init__(self, config=default_config): + self.graph = tf.Graph() + self.graph.as_default() + self.sess = tf.Session(graph=self.graph) + tf.saved_model.loader.load( + self.sess, + [tag_constants.SERVING], + config['model_path']) + self.net_image_in = self.graph.get_tensor_by_name('image:0') + self.net_scores = self.graph.get_tensor_by_name('scores:0') + self.net_logits = self.graph.get_tensor_by_name('logits:0') + self.net_local_desc = self.graph.get_tensor_by_name('local_descriptors:0') + self.net_global_decs = self.graph.get_tensor_by_name('global_descriptor:0') + self.keypoints, self.scores = self.select_keypoints( + self.net_scores, config['keypoint_number'], config['keypoint_threshold'], + config['nms_iterations'], config['nms_radius']) + # inverse ratio for upsampling (should be approx. 1/8) + self.scaling_op = ((tf.cast(tf.shape(self.net_local_desc)[1:3], tf.float32) - 1.) + / (tf.cast(tf.shape(self.net_image_in)[1:3], tf.float32) - 1.)) + # bicubic interpolation (upsample X8 to the image size) and L2-normalization + self.local_descriptors_op = \ + tf.nn.l2_normalize( + tf.contrib.resampler.resampler( + self.net_local_desc, + self.scaling_op[::-1] * tf.to_float(self.keypoints)), + -1) + + + def simple_nms(self, scores, iterations, radius): + """Performs non maximum suppression (NMS) on the heatmap using max-pooling. + This method does not suppress contiguous points that have the same score. + It is an approximate of the standard NMS and uses iterative propagation. + Arguments: + scores: the score heatmap, with shape `[B, H, W]`. + size: an interger scalar, the radius of the NMS window. + """ + if iterations < 1: return scores + with self.graph.as_default(): + with tf.name_scope('simple_nms'): + radius = tf.constant(radius, name='radius') + size = radius*2 + 1 + + max_pool = lambda x: gen_nn_ops.max_pool_v2( # supports dynamic ksize + x[..., None], ksize=[1, size, size, 1], + strides=[1, 1, 1, 1], padding='SAME')[..., 0] + zeros = tf.zeros_like(scores) + max_mask = tf.equal(scores, max_pool(scores)) + for _ in range(iterations-1): + supp_mask = tf.cast(max_pool(tf.to_float(max_mask)), tf.bool) + supp_scores = tf.where(supp_mask, zeros, scores) + new_max_mask = tf.equal(supp_scores, max_pool(supp_scores)) + max_mask = max_mask | (new_max_mask & tf.logical_not(supp_mask)) + return tf.where(max_mask, scores, zeros) + + + def select_keypoints(self, scores, keypoint_number, keypoint_threshold, nms_iterations, nms_radius): + with self.graph.as_default(): + scores = self.simple_nms(scores, nms_iterations, nms_radius) + with tf.name_scope('keypoint_extraction'): + keypoints = tf.where(tf.greater_equal( + scores[0], keypoint_threshold)) + scores = tf.gather_nd(scores[0], keypoints) + with tf.name_scope('top_k_keypoints'): + k = tf.constant(keypoint_number, name='k') + k = tf.minimum(tf.shape(scores)[0], k) + scores, indices = tf.nn.top_k(scores, k) + keypoints = tf.to_int32(tf.gather( + tf.to_float(keypoints), indices)) + keypoints, scores = keypoints[None], scores[None] + keypoints = keypoints[..., ::-1] # x-y convention + return keypoints, scores + + + def infer(self, image): + if len(image.shape) == 2: # grayscale + image_in = image[None,:,:,None] + else: + image_in = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)[None,:,:,None] + results = self.sess.run( + [self.scores, # (1, num_keypoints) float32 + self.net_logits, # (1, 60, 80, 65) float32 + self.net_local_desc, # (1, 60, 80, 256) float32 + self.net_global_decs, # (1, 4096) float32 + self.local_descriptors_op,# (1, num_keypoints, 256) float32 + self.keypoints[0]], # (num_keypoints, 2) int64 + feed_dict = {self.net_image_in: image_in}) + + features = {} + features['keypoints'] = results[-1] + features['scores'] = results[0][0] + features['local_descriptors'] = results[-2] + features['global_descriptor'] = results[-3] + return features diff --git a/feature_extraction/hfnet_vino.py b/feature_extraction/hfnet_vino.py new file mode 100644 index 0000000..180b8b4 --- /dev/null +++ b/feature_extraction/hfnet_vino.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python3 + +import tensorflow as tf +import cv2 +import numpy as np +from openvino.inference_engine import IENetwork +from openvino.inference_engine import IEPlugin,IECore +import os +from tensorflow.python.ops import gen_nn_ops +tf.enable_eager_execution() + +default_config = { + 'cpu_extension': "/opt/intel/openvino/inference_engine/lib/intel64/libcpu_extension_sse4.so", + 'model_path': 'models/hfnet_vino', + 'model_file': "hfnet.xml", + 'weights_file': "hfnet.bin", + 'keypoint_number': 500, + 'keypoint_threshold': 0.002, + 'nms_iterations': 1, + 'nms_radius': 1, +} + +class FeatureNet: + def __init__(self, config=default_config): + self.config = config + self.ie = IECore() + if os.path.exists(config['cpu_extension']): + self.ie.add_extension(config['cpu_extension'], 'CPU') + else: + print('CPU extension file does not exist: %s' % config['cpu_extension']) + model = os.path.join(config['model_path'], config['model_file']) + weights = os.path.join(config['model_path'], config['weights_file']) + self.net = IENetwork(model=model, weights=weights) + # Input size is specified by the OpenVINO model + input_shape = self.net.inputs['image'].shape + self.input_size = (input_shape[3], input_shape[2]) + self.scaling_desc = (np.array(self.input_size) / 8 - 1.) / (np.array(self.input_size) - 1.) + print('OpenVINO model input size: (%d, %d)' % (self.input_size[0], self.input_size[1])) + self.input_blob = next(iter(self.net.inputs)) + self.out_blob = next(iter(self.net.outputs)) + self.net.batch_size = 1 + self.exec_net = self.ie.load_network(network=self.net, device_name="CPU") + + def simple_nms(self, scores, iterations, radius): + """Performs non maximum suppression (NMS) on the heatmap using max-pooling. + This method does not suppress contiguous points that have the same score. + It is an approximate of the standard NMS and uses iterative propagation. + Arguments: + scores: the score heatmap, with shape `[B, H, W]`. + size: an interger scalar, the radius of the NMS window. + """ + if iterations < 1: return scores + radius = tf.constant(radius, name='radius') + size = radius*2 + 1 + + max_pool = lambda x: gen_nn_ops.max_pool_v2( # supports dynamic ksize + x[..., None], ksize=[1, size, size, 1], + strides=[1, 1, 1, 1], padding='SAME')[..., 0] + zeros = tf.zeros_like(scores) + max_mask = tf.equal(scores, max_pool(scores)) + for _ in range(iterations-1): + supp_mask = tf.cast(max_pool(tf.to_float(max_mask)), tf.bool) + supp_scores = tf.where(supp_mask, zeros, scores) + new_max_mask = tf.equal(supp_scores, max_pool(supp_scores)) + max_mask = max_mask | (new_max_mask & tf.logical_not(supp_mask)) + return tf.where(max_mask, scores, zeros) + + def select_keypoints(self, scores, keypoint_number, keypoint_threshold, nms_iterations, nms_radius): + scores = self.simple_nms(scores, nms_iterations, nms_radius) + keypoints = tf.where(tf.greater_equal( + scores[0], keypoint_threshold)) + scores = tf.gather_nd(scores[0], keypoints) + k = tf.constant(keypoint_number, name='k') + k = tf.minimum(tf.shape(scores)[0], k) + scores, indices = tf.nn.top_k(scores, k) + keypoints = tf.to_int32(tf.gather( + tf.to_float(keypoints), indices)) + return np.array(keypoints), np.array(scores) + + def select_keypoints_threshold(self, scores, keypoint_threshold, scale): + keypoints = tf.where(tf.greater_equal(scores[0], self.config['keypoint_threshold'])).numpy() + keypoints = np.array(keypoints) + scores = np.array([scores[0, i[0], i[1]] for i in keypoints]) + return keypoints, scores + + def infer(self, image): + if len(image.shape) == 3: + image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) + scale = [image.shape[1] / self.input_size[0], image.shape[0] / self.input_size[1]] + image_scaled = cv2.resize(image, self.input_size)[:,:,None] + image_scaled = image_scaled.transpose((2, 0, 1)) + res = self.exec_net.infer(inputs={self.input_blob: np.expand_dims(image_scaled, axis=0)}) + + features = {} + scores = res['pred/local_head/detector/Squeeze'] + if self.config['keypoint_number'] == 0 and self.config['nms_iterations'] == 0: + keypoints, features['scores'] = self.select_keypoints_threshold(scores, + self.config['keypoint_threshold'], scale) + else: + keypoints, features['scores'] = self.select_keypoints(scores, + self.config['keypoint_number'], self.config['keypoint_threshold'], + self.config['nms_iterations'], self.config['nms_radius']) + # scaling back and x-y conversion + features['keypoints'] = np.array([[int(i[1] * scale[0]), int(i[0] * scale[1])] for i in keypoints]) + + local = np.transpose(res['pred/local_head/descriptor/Conv_1/BiasAdd/Normalize'],(0,2,3,1)) + if len(features['keypoints']) > 0: + features['local_descriptors'] = \ + tf.nn.l2_normalize( + tf.contrib.resampler.resampler( + local, + tf.to_float(self.scaling_desc)[::-1]*tf.to_float(features['keypoints'][None])), + -1).numpy() + else: + features['local_descriptors'] = np.array([[]]) + + features['global_descriptor'] = res['pred/global_head/dimensionality_reduction/BiasAdd/Normalize'] + + return features diff --git a/feature_extraction/package.xml b/feature_extraction/package.xml new file mode 100644 index 0000000..60943d0 --- /dev/null +++ b/feature_extraction/package.xml @@ -0,0 +1,62 @@ + + + feature_extraction + 0.0.0 + The feature_extraction package + + + + + Xuesong Shi + + + + + + TODO + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + catkin + rospy + rospy + rospy + image_feature_msgs + + + + + + + diff --git a/feature_extraction/show_keypoints.py b/feature_extraction/show_keypoints.py new file mode 100755 index 0000000..810bc37 --- /dev/null +++ b/feature_extraction/show_keypoints.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python3 +""" +A non-ROS script to visualize extracted keypoints of given images +""" + +import os +import cv2 +import numpy as np +import time +import threading +import sys + +def main(): + net_name = 'hfnet_vino' + gui = True + if net_name == 'hfnet_vino': + from hfnet_vino import FeatureNet, default_config + elif net_name == 'hfnet_tf': + from hfnet_tf import FeatureNet, default_config + else: + exit('Unknown net %s' % net_name) + config = default_config + #config['keypoint_threshold'] = 0 + net = FeatureNet(config) + filenames = sys.argv[1:] + for f in filenames: + image = cv2.imread(f) + image = cv2.resize(image, (640, 480)) + start_time = time.time() + features = net.infer(image) + end_time = time.time() + num_keypoints = features['keypoints'].shape[0] + print(f + ': ' + str(image.shape) + + ', %d keypoints, %.2f ms' % (num_keypoints, (end_time - start_time) * 1000)) + if gui: + draw_keypoints(image, features['keypoints'], features['scores']) + title = f + ' (' + net_name + ', ' + str(num_keypoints) + ' keypoints)' + cv2.imshow(title, image) + cv2.waitKey() + +def draw_keypoints(image, keypoints, scores): + upper_score = 0.2 # keypoints with this score or higher will have a red circle + lower_score = 0.002 # keypoints with this score or lower will have a white circle + scale = 1 / (upper_score - lower_score) + for p,s in zip(keypoints, scores): + s = min(max(s - lower_score, 0) * scale, 1) + color = (255 * (1 - s), 255 * (1 - s), 255) # BGR + cv2.circle(image, tuple(p), 3, color, 1) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/feature_extraction/show_match.py b/feature_extraction/show_match.py new file mode 100755 index 0000000..e9a87f5 --- /dev/null +++ b/feature_extraction/show_match.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python3 +""" +A non-ROS script to visualize extracted keypoints and their matches of given image pairs +""" + +import os +import cv2 +import numpy as np +import time +import threading +import sys + +def main(): + net_name = 'hfnet_vino' + gui = True + if net_name == 'hfnet_vino': + from hfnet_vino import FeatureNet, default_config + elif net_name == 'hfnet_tf': + from hfnet_tf import FeatureNet, default_config + else: + exit('Unknown net %s' % net_name) + config = default_config + #config['keypoint_threshold'] = 0.001 + net = FeatureNet(config) + filenames = sys.argv[1:] + file_features = {} + for f in filenames: + image = cv2.imread(f) + #image = cv2.resize(image, (640, 480)) + #cv2.imshow(f, image) + start_time = time.time() + features = net.infer(image) + end_time = time.time() + num_keypoints = features['keypoints'].shape[0] + print(f + ': ' + str(image.shape) + + ', %d keypoints, %.2f ms' % (num_keypoints, (end_time - start_time) * 1000)) + file_features[f] = features + file_features[f]['image'] = image + if gui: + draw_keypoints(image, features['keypoints'], features['scores']) + title = f + ' (' + net_name + ', ' + str(num_keypoints) + ' keypoints)' + cv2.imshow(title, image) + cv2.waitKey() + + f1 = filenames[0] + for f2 in filenames[1:]: + distance = np.linalg.norm(file_features[f1]['global_descriptor'] \ + - file_features[f2]['global_descriptor']) + des1 = list(file_features[f1]['local_descriptors']) + des2 = list(file_features[f2]['local_descriptors']) + des1 = np.squeeze(file_features[f1]['local_descriptors']) + des2 = np.squeeze(file_features[f2]['local_descriptors']) + kp1 = [cv2.KeyPoint(p[0], p[1], _size=2) for p in file_features[f1]['keypoints']] + kp2 = [cv2.KeyPoint(p[0], p[1], _size=2) for p in file_features[f2]['keypoints']] + img1 = file_features[f1]['image'] + img2 = file_features[f2]['image'] + + bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True) + matches = bf.match(des1, des2) + #matches = sorted(matches, key = lambda x:x.distance) + match_img = cv2.drawMatches(img1, kp1, img2, kp2, matches, None, flags=2) + title = os.path.splitext(os.path.basename(f1))[0] + '-' + \ + os.path.splitext(os.path.basename(f2))[0] + '-' + str(distance) + cv2.imshow(title, match_img) + cv2.imwrite(title + '.jpg', match_img) + cv2.waitKey() + +def draw_keypoints(image, keypoints, scores): + upper_score = 0.5 + lower_score = 0.1 + scale = 1 / (upper_score - lower_score) + for p,s in zip(keypoints, scores): + s = min(max(s - lower_score, 0) * scale, 1) + color = (255 * (1 - s), 255 * (1 - s), 255) # BGR + cv2.circle(image, tuple(p), 3, color, 2) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/image_feature_msgs/CMakeLists.txt b/image_feature_msgs/CMakeLists.txt new file mode 100644 index 0000000..0e84fd6 --- /dev/null +++ b/image_feature_msgs/CMakeLists.txt @@ -0,0 +1,36 @@ +# Copyright (c) 2017 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 2.8.3) + +project(image_feature_msgs) + +find_package(catkin REQUIRED COMPONENTS + std_msgs + message_generation +) + + +add_message_files(DIRECTORY msg FILES + KeyPoint.msg + ImageFeatures.msg +) + +generate_messages(DEPENDENCIES + std_msgs +) + +catkin_package( + CATKIN_DEPENDS std_msgs message_runtime +) \ No newline at end of file diff --git a/image_feature_msgs/LICENSE b/image_feature_msgs/LICENSE new file mode 100644 index 0000000..f8098cd --- /dev/null +++ b/image_feature_msgs/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2017 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/image_feature_msgs/msg/ImageFeatures.msg b/image_feature_msgs/msg/ImageFeatures.msg new file mode 100644 index 0000000..d48b5fc --- /dev/null +++ b/image_feature_msgs/msg/ImageFeatures.msg @@ -0,0 +1,20 @@ +# Copyright (c) 2020 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +std_msgs/Header header # must have the same stamp with corresponding image message +std_msgs/Bool sorted_by_score # whether the keypoints are sorted in descending order of their scores +image_feature_msgs/KeyPoint[] keypoints +float32[] scores # score of each keypoint, must be either in the same size with keypoints or empty +std_msgs/Float32MultiArray descriptors # local descriptors of keypoints +float32[] global_descriptor # global descriptor of the full image diff --git a/image_feature_msgs/msg/KeyPoint.msg b/image_feature_msgs/msg/KeyPoint.msg new file mode 100644 index 0000000..f911f1a --- /dev/null +++ b/image_feature_msgs/msg/KeyPoint.msg @@ -0,0 +1,17 @@ +# Copyright (c) 2020 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# pixel coordinates; x rightward, y downward +float32 x +float32 y diff --git a/image_feature_msgs/package.xml b/image_feature_msgs/package.xml new file mode 100644 index 0000000..97b33c3 --- /dev/null +++ b/image_feature_msgs/package.xml @@ -0,0 +1,28 @@ + + + + + image_feature_msgs + 0.3.0 + This package defines messages for image features + Xuesong Shi + + Apache 2.0 + + catkin + std_msgs + message_generation + std_msgs + message_runtime +