diff --git a/examples/policy.py b/examples/policy.py index e9c7068f..1b7f82a5 100644 --- a/examples/policy.py +++ b/examples/policy.py @@ -38,7 +38,8 @@ import numpy as np -from autolab_core import YamlConfig, Logger, BinaryImage, CameraIntrinsics, ColorImage, DepthImage, RgbdImage +from autolab_core import (YamlConfig, Logger, BinaryImage, CameraIntrinsics, + ColorImage, DepthImage, RgbdImage) from visualization import Visualizer2D as vis from gqcnn.grasping import (RobustGraspingPolicy, diff --git a/examples/policy_ros.py b/examples/policy_ros.py index 2c504eb9..e6f3d678 100644 --- a/examples/policy_ros.py +++ b/examples/policy_ros.py @@ -39,7 +39,8 @@ from cv_bridge import CvBridge, CvBridgeError -from autolab_core import Point, Logger, BinaryImage, CameraIntrinsics, ColorImage, DepthImage +from autolab_core import (Point, Logger, BinaryImage, CameraIntrinsics, + ColorImage, DepthImage) from visualization import Visualizer2D as vis from gqcnn.grasping import Grasp2D, SuctionPoint2D, GraspAction diff --git a/examples/policy_with_image_proc.py b/examples/policy_with_image_proc.py index 798e0c2b..6cd8ab48 100644 --- a/examples/policy_with_image_proc.py +++ b/examples/policy_with_image_proc.py @@ -37,7 +37,10 @@ import pcl import skimage -from autolab_core import PointCloud, RigidTransform, YamlConfig, Logger, BinaryImage, CameraIntrinsics, ColorImage, DepthImage, RgbdImage, SegmentationImage +from autolab_core import (PointCloud, RigidTransform, YamlConfig, + Logger, BinaryImage, CameraIntrinsics, + ColorImage, DepthImage, RgbdImage, + SegmentationImage) from visualization import Visualizer2D as vis from gqcnn import (RobustGraspingPolicy, CrossEntropyRobustGraspingPolicy, diff --git a/gqcnn/analysis/analyzer.py b/gqcnn/analysis/analyzer.py index 52f4cd90..0fe509c5 100644 --- a/gqcnn/analysis/analyzer.py +++ b/gqcnn/analysis/analyzer.py @@ -35,7 +35,8 @@ import matplotlib.pyplot as plt import numpy as np -from autolab_core import BinaryClassificationResult, TensorDataset, Logger, DepthImage +from autolab_core import (BinaryClassificationResult, TensorDataset, + Logger, DepthImage) from autolab_core.constants import JSON_INDENT import autolab_core.utils as utils from visualization import Visualizer2D as vis2d diff --git a/gqcnn/grasping/constraint_fn.py b/gqcnn/grasping/constraint_fn.py index 78755398..0dac93be 100644 --- a/gqcnn/grasping/constraint_fn.py +++ b/gqcnn/grasping/constraint_fn.py @@ -117,6 +117,7 @@ def satisfies_constraints(self, grasp): class GraspConstraintFnFactory(object): + @staticmethod def constraint_fn(fn_type, config): if fn_type == "none": diff --git a/gqcnn/grasping/grasp_quality_function.py b/gqcnn/grasping/grasp_quality_function.py index 8e16c4f8..273a5c0a 100644 --- a/gqcnn/grasping/grasp_quality_function.py +++ b/gqcnn/grasping/grasp_quality_function.py @@ -816,6 +816,7 @@ def quality(self, state, actions, params=None): class DiscCurvatureSuctionQualityFunction( GaussianCurvatureSuctionQualityFunction): + def __init__(self, config): """Create approach planarity suction metric.""" self._radius = config["radius"] @@ -848,6 +849,7 @@ def _points_in_window(self, point_cloud_image, action, segmask=None): class ComDiscCurvatureSuctionQualityFunction( DiscCurvatureSuctionQualityFunction): + def __init__(self, config): """Create approach planarity suction metric.""" self._curvature_pctile = config["curvature_pctile"] @@ -911,6 +913,7 @@ def quality(self, state, actions, params=None): class GQCnnQualityFunction(GraspQualityFunction): + def __init__(self, config): """Create a GQCNN suction quality function.""" GraspQualityFunction.__init__(self) @@ -1067,6 +1070,7 @@ def quality(self, state, actions, params): class NoMagicQualityFunction(GraspQualityFunction): + def __init__(self, config): """Create a quality that uses `nomagic_net` as a quality function.""" from nomagic_submission import ConvNetModel @@ -1154,8 +1158,8 @@ def grasps_to_tensors(self, grasps, state): 255))[1].tostring() im_decoded = cv2.imdecode(np.frombuffer(im_encoded, np.uint8), 0) / 255.0 - image_tensor[i, :, :, 0] = ((im_decoded - self._data_mean) / - self._data_std) + image_tensor[i, :, :, + 0] = ((im_decoded - self._data_mean) / self._data_std) if gripper_mode == GripperMode.PARALLEL_JAW: pose_tensor[i] = grasp.depth @@ -1230,6 +1234,7 @@ def quality(self, state, actions, params): class FCGQCnnQualityFunction(GraspQualityFunction): + def __init__(self, config): """Grasp quality function using the fully-convolutional gqcnn.""" GraspQualityFunction.__init__(self) diff --git a/gqcnn/grasping/image_grasp_sampler.py b/gqcnn/grasping/image_grasp_sampler.py index 463ef842..bae577f8 100644 --- a/gqcnn/grasping/image_grasp_sampler.py +++ b/gqcnn/grasping/image_grasp_sampler.py @@ -39,7 +39,8 @@ import scipy.spatial.distance as ssd import scipy.stats as ss -from autolab_core import Point, RigidTransform, Logger, DepthImage, RgbdImage, GdImage +from autolab_core import (Point, RigidTransform, Logger, + DepthImage, RgbdImage, GdImage) from visualization import Visualizer2D as vis from .grasp import Grasp2D, SuctionPoint2D, MultiSuctionPoint2D @@ -558,8 +559,8 @@ def _sample_antipodal_grasps(self, for i in range(self._depth_samples_per_grasp): # Get depth in the neighborhood of the center pixel. depth_win = depth_im.data[grasp_center[0] - - self._h:grasp_center[0] + - self._h, grasp_center[1] - + self._h:grasp_center[0] + self._h, + grasp_center[1] - self._w:grasp_center[1] + self._w] center_depth = np.min(depth_win) if center_depth == 0 or np.isnan(center_depth): diff --git a/gqcnn/grasping/policy/fc_policy.py b/gqcnn/grasping/policy/fc_policy.py index fd9ff091..e2f049a3 100644 --- a/gqcnn/grasping/policy/fc_policy.py +++ b/gqcnn/grasping/policy/fc_policy.py @@ -105,8 +105,8 @@ def _mask_predictions(self, preds, raw_segmask): preds_masked = np.zeros_like(preds) raw_segmask_cropped = raw_segmask[self._gqcnn_recep_h // 2:raw_segmask.shape[0] - - self._gqcnn_recep_h // - 2, self._gqcnn_recep_w // + self._gqcnn_recep_h // 2, + self._gqcnn_recep_w // 2:raw_segmask.shape[1] - self._gqcnn_recep_w // 2, 0] raw_segmask_downsampled = raw_segmask_cropped[::self._gqcnn_stride, :: @@ -321,8 +321,8 @@ def _action(self, state, num_actions=1): self._vis_scale, output_dir=state_output_dir) - return actions[-1] if (self._filter_grasps or num_actions == 1 - ) else actions[-(num_actions + 1):] + return actions[-1] if (self._filter_grasps or num_actions + == 1) else actions[-(num_actions + 1):] def action_set(self, state, num_actions): """Plan a set of actions. @@ -411,8 +411,8 @@ def _get_actions(self, preds, ind, images, depths, camera_intr, depth, width=self._gripper_width, camera_intr=camera_intr) - grasp_action = GraspAction(grasp, - preds[im_idx, h_idx, w_idx, ang_idx], + grasp_action = GraspAction(grasp, preds[im_idx, h_idx, w_idx, + ang_idx], DepthImage(images[im_idx])) actions.append(grasp_action) return actions diff --git a/gqcnn/grasping/policy/policy.py b/gqcnn/grasping/policy/policy.py index 242fca10..27224b8b 100644 --- a/gqcnn/grasping/policy/policy.py +++ b/gqcnn/grasping/policy/policy.py @@ -42,7 +42,9 @@ from sklearn.mixture import GaussianMixture import autolab_core.utils as utils -from autolab_core import Point, Logger, BinaryImage, ColorImage, DepthImage, RgbdImage, SegmentationImage, CameraIntrinsics +from autolab_core import (Point, Logger, BinaryImage, ColorImage, + DepthImage, RgbdImage, SegmentationImage, + CameraIntrinsics) from visualization import Visualizer2D as vis from ..constraint_fn import GraspConstraintFnFactory @@ -1238,7 +1240,7 @@ def action_set(self, state): q_values=norm_q_values, scale=2.0, title=title, - save_fname="final_sampled_grasps.png".format(j), + save_fname="final_sampled_grasps.png", save_path=state_output_dir) display_grasps_and_q_values = zip(grasps, q_values) display_grasps_and_q_values = sorted(display_grasps_and_q_values, @@ -1545,6 +1547,7 @@ def set_constraint_fn(self, constraint_fn): class PriorityCompositeGraspingPolicy(CompositeGraspingPolicy): + def __init__(self, policies, priority_list): # Check validity. for name in priority_list: @@ -1613,6 +1616,7 @@ def action_set(self, state, policy_subset=None, min_q_value=-1.0): class GreedyCompositeGraspingPolicy(CompositeGraspingPolicy): + def __init__(self, policies): CompositeGraspingPolicy.__init__(self, policies) diff --git a/gqcnn/model/tf/network_tf.py b/gqcnn/model/tf/network_tf.py index 4b80078f..72fd1e50 100644 --- a/gqcnn/model/tf/network_tf.py +++ b/gqcnn/model/tf/network_tf.py @@ -878,21 +878,21 @@ def _predict(self, image_arr, pose_arr, verbose=False): end_ind = cur_ind + dim if self._input_depth_mode == InputDepthMode.POSE_STREAM: - self._input_im_arr[:dim, ...] = ( - image_arr[cur_ind:end_ind, ...] - - self._im_mean) / self._im_std + self._input_im_arr[:dim, + ...] = (image_arr[cur_ind:end_ind, ...] + - self._im_mean) / self._im_std self._input_pose_arr[:dim, :] = ( pose_arr[cur_ind:end_ind, :] - self._pose_mean) / self._pose_std elif self._input_depth_mode == InputDepthMode.SUB: self._input_im_arr[:dim, ...] = image_arr[cur_ind:end_ind, ...] - self._input_pose_arr[:dim, :] = pose_arr[cur_ind: - end_ind, :] + self._input_pose_arr[:dim, :] = pose_arr[ + cur_ind:end_ind, :] elif self._input_depth_mode == InputDepthMode.IM_ONLY: - self._input_im_arr[:dim, ...] = ( - image_arr[cur_ind:end_ind, ...] - - self._im_mean) / self._im_std + self._input_im_arr[:dim, + ...] = (image_arr[cur_ind:end_ind, ...] + - self._im_mean) / self._im_std gqcnn_output = self._sess.run( self._output_tensor, @@ -1417,9 +1417,9 @@ def _build_network(self, input_im_node, input_pose_node, self._input_depth_mode == InputDepthMode.IM_ONLY: extraneous_stream_msg = ("When using input depth mode '{}', only" " im stream is allowed!") - assert not ("pose_stream" in self._architecture - or "merge_stream" in self._architecture - ), extraneous_stream_msg.format(self._input_depth_mode) + assert not ("pose_stream" in self._architecture or "merge_stream" + in self._architecture), extraneous_stream_msg.format( + self._input_depth_mode) with tf.name_scope("im_stream"): return self._build_im_stream(input_im_node, input_pose_node, diff --git a/gqcnn/search/resource_manager.py b/gqcnn/search/resource_manager.py index 60a0bcf8..12eb801a 100644 --- a/gqcnn/search/resource_manager.py +++ b/gqcnn/search/resource_manager.py @@ -48,6 +48,7 @@ class ResourceManager(object): + def __init__(self, trial_cpu_load, trial_gpu_load, diff --git a/gqcnn/search/search.py b/gqcnn/search/search.py index 07998979..1b1a89cd 100644 --- a/gqcnn/search/search.py +++ b/gqcnn/search/search.py @@ -50,6 +50,7 @@ class GQCNNSearch(object): + def __init__(self, analysis_config, train_configs, diff --git a/gqcnn/search/trial.py b/gqcnn/search/trial.py index 1c963882..06b8b488 100644 --- a/gqcnn/search/trial.py +++ b/gqcnn/search/trial.py @@ -50,6 +50,7 @@ class TrialStatus: class GQCNNTrialWithAnalysis(ABC): + def __init__(self, analysis_cfg, train_cfg, dataset_dir, split_name, output_dir, model_name, hyperparam_summary): self._analysis_cfg = analysis_cfg @@ -198,11 +199,13 @@ def __str__(self): class GQCNNTrainingAndAnalysisTrial(GQCNNTrialWithAnalysis): + def _run(self, trainer): trainer.train() class GQCNNFineTuningAndAnalysisTrial(GQCNNTrialWithAnalysis): + def __init__(self, analysis_cfg, train_cfg, dataset_dir, base_model_dir, split_name, output_dir, model_name, hyperparam_summary): GQCNNTrialWithAnalysis.__init__(self, analysis_cfg, train_cfg, diff --git a/gqcnn/training/tf/trainer_tf.py b/gqcnn/training/tf/trainer_tf.py index 38d01172..7e85f3a3 100644 --- a/gqcnn/training/tf/trainer_tf.py +++ b/gqcnn/training/tf/trainer_tf.py @@ -1517,8 +1517,9 @@ def _load_and_enqueue(self, seed): (train_label_arr.shape[0], self._angular_bins * 2)) for i in range(angles.shape[0]): bins[i] = angles[i] // self._bin_width - train_pred_mask_arr[ - i, int((angles[i] // self._bin_width) * 2)] = 1 + train_pred_mask_arr[i, + int((angles[i] // + self._bin_width) * 2)] = 1 train_pred_mask_arr[ i, int((angles[i] // self._bin_width) * 2 + 1)] = 1 diff --git a/gqcnn/utils/policy_exceptions.py b/gqcnn/utils/policy_exceptions.py index 8f2edb94..20663b9c 100644 --- a/gqcnn/utils/policy_exceptions.py +++ b/gqcnn/utils/policy_exceptions.py @@ -34,7 +34,10 @@ class NoValidGraspsException(Exception): """Exception for when antipodal point pairs can be found in the depth image but none are valid grasps that can be executed.""" - def __init__(self, in_collision=True, not_confident=False, *args, + def __init__(self, + in_collision=True, + not_confident=False, + *args, **kwargs): self.in_collision = in_collision self.not_confident = not_confident diff --git a/gqcnn/version.py b/gqcnn/version.py index 6849410a..67bc602a 100644 --- a/gqcnn/version.py +++ b/gqcnn/version.py @@ -1 +1 @@ -__version__ = "1.1.0" +__version__ = "1.3.0" diff --git a/package.xml b/package.xml index 7371bc3a..8d2ab44d 100644 --- a/package.xml +++ b/package.xml @@ -24,7 +24,7 @@ MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. --> gqcnn - 1.1.0 + 1.3.0 ROS package for deploying Grasp Quality Convolutional Neural Networks (GQ-CNNs). diff --git a/ros_nodes/grasp_planner_node.py b/ros_nodes/grasp_planner_node.py index 50cd862d..dd8b803e 100755 --- a/ros_nodes/grasp_planner_node.py +++ b/ros_nodes/grasp_planner_node.py @@ -38,7 +38,8 @@ import numpy as np import rospy -from autolab_core import YamlConfig, CameraIntrinsics, ColorImage, DepthImage, BinaryImage, RgbdImage +from autolab_core import (YamlConfig, CameraIntrinsics, ColorImage, + DepthImage, BinaryImage, RgbdImage) from visualization import Visualizer2D as vis from gqcnn.grasping import (Grasp2D, SuctionPoint2D, RgbdImageState, RobustGraspingPolicy, @@ -55,6 +56,7 @@ class GraspPlanner(object): + def __init__(self, cfg, cv_bridge, grasping_policy, grasp_pose_publisher): """ Parameters @@ -118,8 +120,8 @@ def read_images(self, req): raw_camera_info.K[1], raw_camera_info.height, raw_camera_info.width) - # Create wrapped BerkeleyAutomation/autolab_core RGB and depth images by - # unpacking the ROS images using ROS `CvBridge` + # Create wrapped BerkeleyAutomation/autolab_core RGB and depth images + # by unpacking the ROS images using ROS `CvBridge` try: color_im = ColorImage(self.cv_bridge.imgmsg_to_cv2( raw_color, "rgb8"),