Skip to content

Commit

Permalink
Merge branch 'main' into IRM-course
Browse files Browse the repository at this point in the history
  • Loading branch information
tlpss committed Dec 12, 2023
2 parents 1803b46 + 03dcf93 commit c4fe9ed
Show file tree
Hide file tree
Showing 53 changed files with 1,032 additions and 71 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def __init__(
self._image_transform = image_transform

def _log_rgb_image(self) -> None:
import rerun
import rerun as rr

image = self._receiver.get_rgb_image()
# This randomly fails, just don't log an image if it does
Expand All @@ -38,14 +38,15 @@ def _log_rgb_image(self) -> None:
image_rgb = image_bgr[:, :, ::-1]
if self._image_transform is not None:
image_rgb = self._image_transform.transform_image(image_rgb)
rerun.log_image(self._shared_memory_namespace, image_rgb, jpeg_quality=90)

rr.log(self._shared_memory_namespace, rr.Image(image_rgb).compress(jpeg_quality=90))

def run(self) -> None:
"""main loop of the process, runs until the process is terminated"""
import rerun
import rerun as rr

rerun.init(self._rerun_application_id)
rerun.connect()
rr.init(self._rerun_application_id)
rr.connect()

self._receiver = MultiprocessRGBReceiver(self._shared_memory_namespace)

Expand All @@ -70,14 +71,14 @@ def __init__(
)

def _log_depth_image(self) -> None:
import rerun
import rerun as rr

assert isinstance(self._receiver, MultiprocessRGBDReceiver)

depth_image = self._receiver.get_depth_image()
if self._image_transform is not None:
depth_image = self._image_transform.transform_image(depth_image)
rerun.log_image(f"{self._shared_memory_namespace}_depth", depth_image, jpeg_quality=90)
rr.log(f"{self._shared_memory_namespace}_depth", rr.Image(depth_image).compress(jpeg_quality=90))

def run(self) -> None:
"""main loop of the process, runs until the process is terminated"""
Expand All @@ -97,7 +98,7 @@ def stop(self) -> None:


if __name__ == "__main__":
rerun_logger = MultiprocessRGBRerunLogger("camera")
rerun_logger = MultiprocessRGBDRerunLogger("camera")
rerun_logger.start()
time.sleep(10)
rerun_logger.stop()
Original file line number Diff line number Diff line change
Expand Up @@ -287,6 +287,8 @@ def __del__(self) -> None:
"""
from airo_camera_toolkit.cameras.zed2i import Zed2i

namespace = "camera"

# Creating and starting the publisher
p = MultiprocessRGBPublisher(
Zed2i,
Expand All @@ -295,11 +297,11 @@ def __del__(self) -> None:
"fps": 30,
"depth_mode": Zed2i.NONE_DEPTH_MODE,
},
shared_memory_namespace=namespace,
)
p.start()

# The receiver behaves just like a regular RGBCamera
namespace = "camera"
receiver = MultiprocessRGBReceiver(namespace)

cv2.namedWindow(namespace, cv2.WINDOW_NORMAL)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def stop(self) -> None:

if __name__ == "__main__":
"""Records 10 seconds of video. Assumes there's being published to the "camera" namespace."""
recorder = MultiprocessVideoRecorder("zed_top")
recorder = MultiprocessVideoRecorder("camera")
recorder.start()
time.sleep(10)
recorder.stop()
50 changes: 18 additions & 32 deletions airo-camera-toolkit/docs/rerun-zed-example.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
"\n",
"\n",
"Rerun has more features such as logging meshes, logging 3D bboxes, URDFs (in process). Check the docs to learn more. \n",
"Also note that this tool is still very 'young', it became public only mid february 2023. There are many issues that need to be solved. E.g. the pypi wheel is broken atm and you cannot save the rerun file from the UI. The team is very responsive on github so shoot if you find issues etc."
"Also note that this tool is still very 'young', it became public only mid february 2023. There are many issues that need to be solved. The team is very responsive on github so shoot if you find issues etc."
]
},
{
Expand All @@ -27,7 +27,7 @@
"outputs": [],
"source": [
"from airo_camera_toolkit.cameras.zed2i import Zed2i\n",
"import rerun\n",
"import rerun as rr\n",
"#autoreload\n",
"%load_ext autoreload\n",
"%autoreload 2"
Expand All @@ -41,7 +41,7 @@
"source": [
"# start rerun. If the UI is already running, it will connect to it. Otherwise it will start a new UI and connect to it.\n",
"# you can also start rerun using `python -m rerun`.\n",
"rerun.init(\"test\",spawn=True)"
"rr.init(\"test\",spawn=True)"
]
},
{
Expand All @@ -64,23 +64,14 @@
"depth = zed.get_depth_image()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"zed.depth_enabled = False"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# log the colored pointcloud to the UI\n",
"rerun.log_points(\"world/camera1/pointcloud\",positions=pointcloud[:,:3],colors=pointcloud[:,3:6])\n"
"rr.log(\"world/camera1/pointcloud\", rr.Points3D(positions=pointcloud[:,:3],colors=pointcloud[:,3:6]))\n"
]
},
{
Expand All @@ -99,7 +90,7 @@
"outputs": [],
"source": [
"# log the non-colored pointcloud to the UI in the same world/camera1 space\n",
"rerun.log_points(\"world/camera1/non_color_pointcloud\",positions=pointcloud[:,:3],colors=[0.5,0.5,0.5])"
"rr.log(\"world/camera1/non_color_pointcloud\", rr.Points3D(positions=pointcloud[:,:3],colors=[0.5,0.5,0.5]))"
]
},
{
Expand All @@ -109,8 +100,8 @@
"outputs": [],
"source": [
"# log the rgb and depth images to the UI in a new image/camera1 space, this will automatically become a 2D image viewer.\n",
"rerun.log_image(\"image/camera1/rgb\",rgb)\n",
"rerun.log_image(\"image/camera1/depth\",depth)"
"rr.log(\"image/camera1/rgb\",rr.Image(rgb))\n",
"rr.log(\"image/camera1/depth\",rr.Image(depth))"
]
},
{
Expand All @@ -120,7 +111,7 @@
"outputs": [],
"source": [
"# log a dummy 2D bbox \n",
"rerun.log_rect(\"image/camera1/rect\",[20,100,300,500])"
"rr.log(\"image/camera1/rect\", rr.Boxes2D(mins=[20,100], sizes=[300,500]))"
]
},
{
Expand All @@ -130,7 +121,7 @@
"outputs": [],
"source": [
"# log some dummy keypoints and attach labels\n",
"rerun.log_points(\"image/camera1/keypoints\",[[600,500],[400,500]],keypoint_ids=[0,1],radii=20, labels=[\"keypoint1\",\"keypoint2\"])"
"rr.log(\"image/camera1/keypoints\", rr.Points2D([[600,500],[400,500]],keypoint_ids=[0,1],radii=20, labels=[\"keypoint1\",\"keypoint2\"]))"
]
},
{
Expand All @@ -143,7 +134,8 @@
"# This specifies the pose of camera in world.\n",
"translation = [0,0.5,0.7] \n",
"rotation = [-0.707,0,0,0.707] #scalar-last! \n",
"rerun.log_rigid3(\"world/camera1\", parent_from_child=(translation, rotation))"
"# rr.log_rigid3(\"world/camera1\", parent_from_child=(translation, rotation))\n",
"rr.log(\"world/camera1\", rr.Transform3D(translation=translation, rotation=rotation))"
]
},
{
Expand All @@ -153,14 +145,8 @@
"outputs": [],
"source": [
"# log the actual camera intrinsics, to create a pinhole camera model in the UI.\n",
"width = 1920\n",
"height = 1080\n",
"intrinsics = zed.intrinsics_matrix\n",
"rerun.log_pinhole(\"world/camera1/rgb\",\n",
" child_from_parent = intrinsics,\n",
" width = width,\n",
" height = height)\n",
"rerun.log_image(\"world/camera1/rgb\",rgb)\n"
"rr.log(\"world/camera1/rgb\", rr.Pinhole(image_from_camera=zed.intrinsics_matrix(), resolution=zed.resolution))\n",
"rr.log(\"world/camera1/rgb\",rr.Image(rgb))"
]
},
{
Expand All @@ -170,8 +156,8 @@
"outputs": [],
"source": [
"# set up the 'view' of the 3D world space. This is for convenience so that rerun can sensible starting orientations for the spaces. \n",
"rerun.log_view_coordinates(\"world\", up=\"+Z\", right_handed=True, timeless=True)\n",
"rerun.log_view_coordinates(\"world/camera1\", xyz=\"RDF\")\n"
"rr.log(\"world\", rr.ViewCoordinates.RIGHT_HAND_Z_UP, timeless=True)\n",
"rr.log(\"world/camera1\", rr.ViewCoordinates.RDF, timeless=True)"
]
},
{
Expand All @@ -183,7 +169,7 @@
"# log some more data\n",
"for _ in range(5):\n",
" pointcloud = zed.get_colored_point_cloud()\n",
" rerun.log_points(\"world/camera1/pointcloud\",positions=pointcloud[:,:3],colors=pointcloud[:,3:6])"
" rr.log(\"world/camera1/pointcloud\", rr.Points3D(positions=pointcloud[:,:3],colors=pointcloud[:,3:6]))"
]
},
{
Expand All @@ -194,7 +180,7 @@
"source": [
"# log time-series (e.g. F/T sensor)\n",
"for i in range(100):\n",
" rerun.log_scalar(\"world/robot/force\",i)\n"
" rr.log(\"world/robot/force\",rr.TimeSeriesScalar(i))\n"
]
},
{
Expand Down Expand Up @@ -234,7 +220,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.16"
"version": "3.9.18"
},
"vscode": {
"interpreter": {
Expand Down
2 changes: 1 addition & 1 deletion airo-camera-toolkit/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
"opencv-contrib-python==4.8.1.78", # We need opencv contrib for the aruco marker detection, but when some packages install (a different version of) opencv-python-headless, this breaks the contrib version. So we install both here to make sure they are the same version.
"opencv-python-headless==4.8.1.78", # Lock to match contrib version.
"matplotlib",
"rerun-sdk==0.9.0",
"rerun-sdk>=0.11.0",
"click==8.1.3", # 8.1.4 breaks mypy
"loguru",
],
Expand Down
3 changes: 3 additions & 0 deletions airo-dataset-tools/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,5 +13,8 @@ They fall into two categories:
* 3D poses
* Camera instrinsics


> [Pydantic](https://docs.pydantic.dev/latest/) is used heavily throughout this package.
It allows you to easily create Python objects that can be saved and loaded to and from JSON files.

[**CVAT labeling workflow & tools**](airo_dataset_tools/cvat_labeling/readme.md)
64 changes: 50 additions & 14 deletions airo-dataset-tools/airo_dataset_tools/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,12 @@
from typing import List, Optional

import click
from airo_dataset_tools.coco_tools.change_coco_images_prefix import change_coco_json_image_prefix
from airo_dataset_tools.coco_tools.coco_instances_to_yolo import create_yolo_dataset_from_coco_instances_dataset
from airo_dataset_tools.coco_tools.fiftyone_viewer import view_coco_dataset
from airo_dataset_tools.coco_tools.merge_datasets import merge_coco_datasets
from airo_dataset_tools.coco_tools.split_dataset import split_and_save_coco_dataset
from airo_dataset_tools.coco_tools.transform_dataset import resize_coco_keypoints_dataset
from airo_dataset_tools.coco_tools.transform_dataset import resize_coco_dataset
from airo_dataset_tools.cvat_labeling.convert_cvat_to_coco import cvat_image_to_coco


Expand Down Expand Up @@ -41,9 +43,9 @@ def view_coco_dataset_cli(

@cli.command(name="convert-cvat-to-coco-keypoints")
@click.argument("cvat_xml_file", type=str, required=True)
@click.argument("coco_categories_json_file", type=str, required=True)
@click.option("--add_bbox", is_flag=True, default=False, help="include bounding box in coco annotations")
@click.option("--add_segmentation", is_flag=True, default=False, help="include segmentation in coco annotations")
@click.argument("coco-categories-json-file", type=str, required=True)
@click.option("--add-bbox", is_flag=True, default=False, help="include bounding box in coco annotations")
@click.option("--add-segmentation", is_flag=True, default=False, help="include segmentation in coco annotations")
def convert_cvat_to_coco_cli(
cvat_xml_file: str, coco_categories_json_file: str, add_bbox: bool, add_segmentation: bool
) -> None:
Expand All @@ -58,24 +60,27 @@ def convert_cvat_to_coco_cli(
json.dump(coco, file)


@cli.command(name="resize-coco-keypoints-dataset")
@cli.command(name="resize-coco-dataset")
@click.argument("annotations-json-path", type=click.Path(exists=True))
@click.option("--width", type=int, required=True)
@click.option("--height", type=int, required=True)
def resize_coco_keypoints_dataset_cli(annotations_json_path: str, width: int, height: int) -> None:
"""Resize a COCO dataset. Will create a new directory with the resized dataset on the same level as the original dataset.
@click.option("--target-dataset-dir", type=str, required=False)
def resize_coco_dataset_cli(
annotations_json_path: str, width: int, height: int, target_dataset_dir: Optional[str]
) -> None:
"""Resize a COCO dataset. Will create a new directory with the resized dataset at the specified target_dataset_dir.
Dataset is assumed to be
/dir
annotations.json # contains relative paths w.r.t. /dir
...
"""
resize_coco_keypoints_dataset(annotations_json_path, width, height)
resize_coco_dataset(annotations_json_path, width, height, target_dataset_dir=target_dataset_dir)


@cli.command(name="coco-instances-to-yolo")
@click.option("--coco_json", type=str)
@click.option("--target_dir", type=str)
@click.option("--use_segmentation", is_flag=True)
@click.option("--coco-json", type=str)
@click.option("--target-dir", type=str)
@click.option("--use-segmentation", is_flag=True)
def coco_intances_to_yolo(coco_json: str, target_dir: str, use_segmentation: bool) -> None:
"""Create a YOLO detections/segmentations dataset from a coco instances dataset"""
print(f"converting coco instances dataset {coco_json} to yolo dataset {target_dir}")
Expand All @@ -84,16 +89,47 @@ def coco_intances_to_yolo(coco_json: str, target_dir: str, use_segmentation: boo

@cli.command(name="split-coco-dataset")
@click.argument("json-path", type=click.Path(exists=True))
@click.option("--split-ratios", type=float, multiple=True, required=True)
@click.option("--split-ratio", type=float, multiple=True, required=True)
@click.option("--shuffle-before-splitting", is_flag=True, default=True)
def split_coco_dataset_cli(json_path: str, split_ratios: List[float], shuffle_before_splitting: bool) -> None:
def split_coco_dataset_cli(json_path: str, split_ratio: List[float], shuffle_before_splitting: bool) -> None:
"""Split a COCO dataset into subsets according to the specified relative ratios and save them to disk.
Images are split with their corresponding annotations. No guarantees on class balance or annotation ratios.
If two ratios are specified, the dataset will be split into two subsets. these will be called train/val by default.
If three ratios are specified, the dataset will be split into three subsets. these will be called train/val/test by default.
e.g. split-coco-dataset <path> --split-ratio 0.8 --split-ratio 0.2
"""
split_and_save_coco_dataset(json_path, split_ratios, shuffle_before_splitting=shuffle_before_splitting)
split_and_save_coco_dataset(json_path, split_ratio, shuffle_before_splitting=shuffle_before_splitting)


@cli.command(name="change-coco-images-prefix")
@click.argument("coco-json", type=click.Path(exists=True))
@click.option("--current-prefix", type=str, required=True)
@click.option("--new-prefix", type=str, required=True)
@click.option("--target-json-path", type=str)
def change_coco_images_prefix_cli(
coco_json: str, current_prefix: str, new_prefix: str, target_json_path: Optional[str] = None
) -> None:
"""change the prefix of images in a coco dataset."""
return change_coco_json_image_prefix(coco_json, current_prefix, new_prefix, target_json_path)


@cli.command(name="merge-coco-datasets")
@click.argument("coco-json-1", type=click.Path(exists=True))
@click.argument("coco-json-2", type=click.Path(exists=True))
@click.option(
"--target-json-path",
type=str,
help="optional path to save the merged dataset to. If none is provided, a new directory will be created in the parent directory of coco-json-1 ",
)
def merge_coco_datasets_cli(coco_json_1: str, coco_json_2: str, target_json_path: Optional[str] = None) -> None:
"""merge two coco datasets into a single dataset."""
if not target_json_path:
target_json_path = os.path.join(os.path.dirname(coco_json_1), "merged")
os.makedirs(target_json_path, exist_ok=True)
target_json_path = os.path.join(target_json_path, "annotations.json")
return merge_coco_datasets(coco_json_1, coco_json_2, target_json_path)


if __name__ == "__main__":
Expand Down
Loading

0 comments on commit c4fe9ed

Please sign in to comment.