Skip to content

Commit

Permalink
version bump and formatting
Browse files Browse the repository at this point in the history
  • Loading branch information
mjd3 committed Jan 5, 2022
1 parent 14005bb commit 499a609
Show file tree
Hide file tree
Showing 18 changed files with 64 additions and 36 deletions.
3 changes: 2 additions & 1 deletion examples/policy.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,8 @@

import numpy as np

from autolab_core import YamlConfig, Logger, BinaryImage, CameraIntrinsics, ColorImage, DepthImage, RgbdImage
from autolab_core import (YamlConfig, Logger, BinaryImage, CameraIntrinsics,
ColorImage, DepthImage, RgbdImage)
from visualization import Visualizer2D as vis

from gqcnn.grasping import (RobustGraspingPolicy,
Expand Down
3 changes: 2 additions & 1 deletion examples/policy_ros.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,8 @@

from cv_bridge import CvBridge, CvBridgeError

from autolab_core import Point, Logger, BinaryImage, CameraIntrinsics, ColorImage, DepthImage
from autolab_core import (Point, Logger, BinaryImage, CameraIntrinsics,
ColorImage, DepthImage)
from visualization import Visualizer2D as vis

from gqcnn.grasping import Grasp2D, SuctionPoint2D, GraspAction
Expand Down
5 changes: 4 additions & 1 deletion examples/policy_with_image_proc.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,10 @@
import pcl
import skimage

from autolab_core import PointCloud, RigidTransform, YamlConfig, Logger, BinaryImage, CameraIntrinsics, ColorImage, DepthImage, RgbdImage, SegmentationImage
from autolab_core import (PointCloud, RigidTransform, YamlConfig,
Logger, BinaryImage, CameraIntrinsics,
ColorImage, DepthImage, RgbdImage,
SegmentationImage)
from visualization import Visualizer2D as vis

from gqcnn import (RobustGraspingPolicy, CrossEntropyRobustGraspingPolicy,
Expand Down
3 changes: 2 additions & 1 deletion gqcnn/analysis/analyzer.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,8 @@
import matplotlib.pyplot as plt
import numpy as np

from autolab_core import BinaryClassificationResult, TensorDataset, Logger, DepthImage
from autolab_core import (BinaryClassificationResult, TensorDataset,
Logger, DepthImage)
from autolab_core.constants import JSON_INDENT
import autolab_core.utils as utils
from visualization import Visualizer2D as vis2d
Expand Down
1 change: 1 addition & 0 deletions gqcnn/grasping/constraint_fn.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,7 @@ def satisfies_constraints(self, grasp):


class GraspConstraintFnFactory(object):

@staticmethod
def constraint_fn(fn_type, config):
if fn_type == "none":
Expand Down
9 changes: 7 additions & 2 deletions gqcnn/grasping/grasp_quality_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -816,6 +816,7 @@ def quality(self, state, actions, params=None):

class DiscCurvatureSuctionQualityFunction(
GaussianCurvatureSuctionQualityFunction):

def __init__(self, config):
"""Create approach planarity suction metric."""
self._radius = config["radius"]
Expand Down Expand Up @@ -848,6 +849,7 @@ def _points_in_window(self, point_cloud_image, action, segmask=None):

class ComDiscCurvatureSuctionQualityFunction(
DiscCurvatureSuctionQualityFunction):

def __init__(self, config):
"""Create approach planarity suction metric."""
self._curvature_pctile = config["curvature_pctile"]
Expand Down Expand Up @@ -911,6 +913,7 @@ def quality(self, state, actions, params=None):


class GQCnnQualityFunction(GraspQualityFunction):

def __init__(self, config):
"""Create a GQCNN suction quality function."""
GraspQualityFunction.__init__(self)
Expand Down Expand Up @@ -1067,6 +1070,7 @@ def quality(self, state, actions, params):


class NoMagicQualityFunction(GraspQualityFunction):

def __init__(self, config):
"""Create a quality that uses `nomagic_net` as a quality function."""
from nomagic_submission import ConvNetModel
Expand Down Expand Up @@ -1154,8 +1158,8 @@ def grasps_to_tensors(self, grasps, state):
255))[1].tostring()
im_decoded = cv2.imdecode(np.frombuffer(im_encoded, np.uint8),
0) / 255.0
image_tensor[i, :, :, 0] = ((im_decoded - self._data_mean) /
self._data_std)
image_tensor[i, :, :,
0] = ((im_decoded - self._data_mean) / self._data_std)

if gripper_mode == GripperMode.PARALLEL_JAW:
pose_tensor[i] = grasp.depth
Expand Down Expand Up @@ -1230,6 +1234,7 @@ def quality(self, state, actions, params):


class FCGQCnnQualityFunction(GraspQualityFunction):

def __init__(self, config):
"""Grasp quality function using the fully-convolutional gqcnn."""
GraspQualityFunction.__init__(self)
Expand Down
7 changes: 4 additions & 3 deletions gqcnn/grasping/image_grasp_sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,8 @@
import scipy.spatial.distance as ssd
import scipy.stats as ss

from autolab_core import Point, RigidTransform, Logger, DepthImage, RgbdImage, GdImage
from autolab_core import (Point, RigidTransform, Logger,
DepthImage, RgbdImage, GdImage)
from visualization import Visualizer2D as vis

from .grasp import Grasp2D, SuctionPoint2D, MultiSuctionPoint2D
Expand Down Expand Up @@ -558,8 +559,8 @@ def _sample_antipodal_grasps(self,
for i in range(self._depth_samples_per_grasp):
# Get depth in the neighborhood of the center pixel.
depth_win = depth_im.data[grasp_center[0] -
self._h:grasp_center[0] +
self._h, grasp_center[1] -
self._h:grasp_center[0] + self._h,
grasp_center[1] -
self._w:grasp_center[1] + self._w]
center_depth = np.min(depth_win)
if center_depth == 0 or np.isnan(center_depth):
Expand Down
12 changes: 6 additions & 6 deletions gqcnn/grasping/policy/fc_policy.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,8 +105,8 @@ def _mask_predictions(self, preds, raw_segmask):
preds_masked = np.zeros_like(preds)
raw_segmask_cropped = raw_segmask[self._gqcnn_recep_h //
2:raw_segmask.shape[0] -
self._gqcnn_recep_h //
2, self._gqcnn_recep_w //
self._gqcnn_recep_h // 2,
self._gqcnn_recep_w //
2:raw_segmask.shape[1] -
self._gqcnn_recep_w // 2, 0]
raw_segmask_downsampled = raw_segmask_cropped[::self._gqcnn_stride, ::
Expand Down Expand Up @@ -321,8 +321,8 @@ def _action(self, state, num_actions=1):
self._vis_scale,
output_dir=state_output_dir)

return actions[-1] if (self._filter_grasps or num_actions == 1
) else actions[-(num_actions + 1):]
return actions[-1] if (self._filter_grasps or num_actions
== 1) else actions[-(num_actions + 1):]

def action_set(self, state, num_actions):
"""Plan a set of actions.
Expand Down Expand Up @@ -411,8 +411,8 @@ def _get_actions(self, preds, ind, images, depths, camera_intr,
depth,
width=self._gripper_width,
camera_intr=camera_intr)
grasp_action = GraspAction(grasp,
preds[im_idx, h_idx, w_idx, ang_idx],
grasp_action = GraspAction(grasp, preds[im_idx, h_idx, w_idx,
ang_idx],
DepthImage(images[im_idx]))
actions.append(grasp_action)
return actions
Expand Down
8 changes: 6 additions & 2 deletions gqcnn/grasping/policy/policy.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,9 @@
from sklearn.mixture import GaussianMixture

import autolab_core.utils as utils
from autolab_core import Point, Logger, BinaryImage, ColorImage, DepthImage, RgbdImage, SegmentationImage, CameraIntrinsics
from autolab_core import (Point, Logger, BinaryImage, ColorImage,
DepthImage, RgbdImage, SegmentationImage,
CameraIntrinsics)
from visualization import Visualizer2D as vis

from ..constraint_fn import GraspConstraintFnFactory
Expand Down Expand Up @@ -1238,7 +1240,7 @@ def action_set(self, state):
q_values=norm_q_values,
scale=2.0,
title=title,
save_fname="final_sampled_grasps.png".format(j),
save_fname="final_sampled_grasps.png",
save_path=state_output_dir)
display_grasps_and_q_values = zip(grasps, q_values)
display_grasps_and_q_values = sorted(display_grasps_and_q_values,
Expand Down Expand Up @@ -1545,6 +1547,7 @@ def set_constraint_fn(self, constraint_fn):


class PriorityCompositeGraspingPolicy(CompositeGraspingPolicy):

def __init__(self, policies, priority_list):
# Check validity.
for name in priority_list:
Expand Down Expand Up @@ -1613,6 +1616,7 @@ def action_set(self, state, policy_subset=None, min_q_value=-1.0):


class GreedyCompositeGraspingPolicy(CompositeGraspingPolicy):

def __init__(self, policies):
CompositeGraspingPolicy.__init__(self, policies)

Expand Down
22 changes: 11 additions & 11 deletions gqcnn/model/tf/network_tf.py
Original file line number Diff line number Diff line change
Expand Up @@ -878,21 +878,21 @@ def _predict(self, image_arr, pose_arr, verbose=False):
end_ind = cur_ind + dim

if self._input_depth_mode == InputDepthMode.POSE_STREAM:
self._input_im_arr[:dim, ...] = (
image_arr[cur_ind:end_ind, ...] -
self._im_mean) / self._im_std
self._input_im_arr[:dim,
...] = (image_arr[cur_ind:end_ind, ...]
- self._im_mean) / self._im_std
self._input_pose_arr[:dim, :] = (
pose_arr[cur_ind:end_ind, :] -
self._pose_mean) / self._pose_std
elif self._input_depth_mode == InputDepthMode.SUB:
self._input_im_arr[:dim, ...] = image_arr[cur_ind:end_ind,
...]
self._input_pose_arr[:dim, :] = pose_arr[cur_ind:
end_ind, :]
self._input_pose_arr[:dim, :] = pose_arr[
cur_ind:end_ind, :]
elif self._input_depth_mode == InputDepthMode.IM_ONLY:
self._input_im_arr[:dim, ...] = (
image_arr[cur_ind:end_ind, ...] -
self._im_mean) / self._im_std
self._input_im_arr[:dim,
...] = (image_arr[cur_ind:end_ind, ...]
- self._im_mean) / self._im_std

gqcnn_output = self._sess.run(
self._output_tensor,
Expand Down Expand Up @@ -1417,9 +1417,9 @@ def _build_network(self, input_im_node, input_pose_node,
self._input_depth_mode == InputDepthMode.IM_ONLY:
extraneous_stream_msg = ("When using input depth mode '{}', only"
" im stream is allowed!")
assert not ("pose_stream" in self._architecture
or "merge_stream" in self._architecture
), extraneous_stream_msg.format(self._input_depth_mode)
assert not ("pose_stream" in self._architecture or "merge_stream"
in self._architecture), extraneous_stream_msg.format(
self._input_depth_mode)
with tf.name_scope("im_stream"):
return self._build_im_stream(input_im_node,
input_pose_node,
Expand Down
1 change: 1 addition & 0 deletions gqcnn/search/resource_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@


class ResourceManager(object):

def __init__(self,
trial_cpu_load,
trial_gpu_load,
Expand Down
1 change: 1 addition & 0 deletions gqcnn/search/search.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@


class GQCNNSearch(object):

def __init__(self,
analysis_config,
train_configs,
Expand Down
3 changes: 3 additions & 0 deletions gqcnn/search/trial.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ class TrialStatus:


class GQCNNTrialWithAnalysis(ABC):

def __init__(self, analysis_cfg, train_cfg, dataset_dir, split_name,
output_dir, model_name, hyperparam_summary):
self._analysis_cfg = analysis_cfg
Expand Down Expand Up @@ -198,11 +199,13 @@ def __str__(self):


class GQCNNTrainingAndAnalysisTrial(GQCNNTrialWithAnalysis):

def _run(self, trainer):
trainer.train()


class GQCNNFineTuningAndAnalysisTrial(GQCNNTrialWithAnalysis):

def __init__(self, analysis_cfg, train_cfg, dataset_dir, base_model_dir,
split_name, output_dir, model_name, hyperparam_summary):
GQCNNTrialWithAnalysis.__init__(self, analysis_cfg, train_cfg,
Expand Down
5 changes: 3 additions & 2 deletions gqcnn/training/tf/trainer_tf.py
Original file line number Diff line number Diff line change
Expand Up @@ -1517,8 +1517,9 @@ def _load_and_enqueue(self, seed):
(train_label_arr.shape[0], self._angular_bins * 2))
for i in range(angles.shape[0]):
bins[i] = angles[i] // self._bin_width
train_pred_mask_arr[
i, int((angles[i] // self._bin_width) * 2)] = 1
train_pred_mask_arr[i,
int((angles[i] //
self._bin_width) * 2)] = 1
train_pred_mask_arr[
i, int((angles[i] // self._bin_width) * 2 + 1)] = 1

Expand Down
5 changes: 4 additions & 1 deletion gqcnn/utils/policy_exceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,10 @@ class NoValidGraspsException(Exception):
"""Exception for when antipodal point pairs can be found in the depth
image but none are valid grasps that can be executed."""

def __init__(self, in_collision=True, not_confident=False, *args,
def __init__(self,
in_collision=True,
not_confident=False,
*args,
**kwargs):
self.in_collision = in_collision
self.not_confident = not_confident
Expand Down
2 changes: 1 addition & 1 deletion gqcnn/version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "1.1.0"
__version__ = "1.3.0"
2 changes: 1 addition & 1 deletion package.xml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-->
<package>
<name>gqcnn</name>
<version>1.1.0</version>
<version>1.3.0</version>
<description>ROS package for deploying Grasp Quality Convolutional Neural Networks (GQ-CNNs).</description>

<!-- One maintainer tag required, multiple allowed, one person per tag -->
Expand Down
8 changes: 5 additions & 3 deletions ros_nodes/grasp_planner_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,8 @@
import numpy as np
import rospy

from autolab_core import YamlConfig, CameraIntrinsics, ColorImage, DepthImage, BinaryImage, RgbdImage
from autolab_core import (YamlConfig, CameraIntrinsics, ColorImage,
DepthImage, BinaryImage, RgbdImage)
from visualization import Visualizer2D as vis
from gqcnn.grasping import (Grasp2D, SuctionPoint2D, RgbdImageState,
RobustGraspingPolicy,
Expand All @@ -55,6 +56,7 @@


class GraspPlanner(object):

def __init__(self, cfg, cv_bridge, grasping_policy, grasp_pose_publisher):
"""
Parameters
Expand Down Expand Up @@ -118,8 +120,8 @@ def read_images(self, req):
raw_camera_info.K[1], raw_camera_info.height,
raw_camera_info.width)

# Create wrapped BerkeleyAutomation/autolab_core RGB and depth images by
# unpacking the ROS images using ROS `CvBridge`
# Create wrapped BerkeleyAutomation/autolab_core RGB and depth images
# by unpacking the ROS images using ROS `CvBridge`
try:
color_im = ColorImage(self.cv_bridge.imgmsg_to_cv2(
raw_color, "rgb8"),
Expand Down

0 comments on commit 499a609

Please sign in to comment.