Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(fastalbel_to_t4dataset): fix keyframe handling #187

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion perception_dataset/deepen/deepen_to_t4_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,9 @@
)
from perception_dataset.rosbag2.rosbag2_converter import Rosbag2Converter
from perception_dataset.t4_dataset.annotation_files_generator import AnnotationFilesGenerator
from perception_dataset.t4_dataset.keyframe_consistency_resolver import KeyFrameConsistencyResolver
from perception_dataset.t4_dataset.resolver.keyframe_consistency_resolver import (
KeyFrameConsistencyResolver,
)
from perception_dataset.utils.logger import configure_logger
import perception_dataset.utils.misc as misc_utils

Expand Down
29 changes: 9 additions & 20 deletions perception_dataset/fastlabel_to_t4/fastlabel_2d_to_t4_updater.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from __future__ import annotations

import json
import os.path as osp
from pathlib import Path
import shutil
Expand Down Expand Up @@ -40,10 +39,14 @@ def __init__(

def convert(self) -> None:
t4_datasets = sorted([d.name for d in self._input_base.iterdir() if d.is_dir()])
anno_jsons_dict = self._load_annotation_jsons(t4_datasets)
anno_jsons_dict = self._load_annotation_jsons(t4_datasets, "_CAM")
fl_annotations = self._format_fastlabel_annotation(anno_jsons_dict)

for t4dataset_name in t4_datasets:
# Check if annotation exists
if t4dataset_name not in fl_annotations.keys():
continue

# Check if input directory exists
input_dir = self._input_base / t4dataset_name
input_annotation_dir = input_dir / "annotation"
Expand Down Expand Up @@ -78,28 +81,14 @@ def convert(self) -> None:
else:
raise ValueError("If you want to overwrite files, use --overwrite option.")

if t4dataset_name not in fl_annotations.keys():
logger.warning(f"No annotation for {t4dataset_name}")
continue

# Start updating annotations
annotation_files_updater = AnnotationFilesUpdater(description=self._description)
annotation_files_updater = AnnotationFilesUpdater(
description=self._description, surface_categories=self._surface_categories
)
annotation_files_updater.convert_one_scene(
input_dir=input_dir,
output_dir=output_dir,
scene_anno_dict=fl_annotations[t4dataset_name],
dataset_name=t4dataset_name,
)

def _load_annotation_jsons(self, t4_datasets: list[str]) -> dict[str, list[dict[str, any]]]:
anno_dict = {}
for file in self._input_anno_files:
t4_dataset_name = file.name.split("_CAM")[0]
if t4_dataset_name not in t4_datasets:
continue
with open(file) as f:
one_label = json.load(f)
if t4_dataset_name not in anno_dict.keys():
anno_dict[t4_dataset_name] = []
anno_dict[t4_dataset_name].extend(one_label)
return anno_dict
logger.info(f"Finished updating annotations for {t4dataset_name}")
11 changes: 5 additions & 6 deletions perception_dataset/t4_dataset/annotation_files_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,10 +87,11 @@ def convert_one_scene(

nusc = NuScenes(version="annotation", dataroot=input_dir, verbose=False)
frame_index_to_sample_token: Dict[int, str] = {}
for frame_index, sample in enumerate(nusc.sample):
frame_index_to_sample_token[frame_index] = sample["token"]
for sample_data in nusc.sample_data:
frame_index = int((sample_data["filename"].split("/")[2]).split(".")[0])
frame_index_to_sample_token[frame_index] = sample_data["sample_token"]
try:
if "LIDAR_TOP" in sample["data"]:
if "LIDAR_TOP" in nusc.sample[0]["data"]:
lidar_sensor_channel = SENSOR_ENUM.LIDAR_TOP.value["channel"]
else:
lidar_sensor_channel = SENSOR_ENUM.LIDAR_CONCAT.value["channel"]
Expand All @@ -116,9 +117,7 @@ def convert_one_scene(
prev_wid_hgt: Tuple = (0, 0)
# NOTE: num_cameras is always 6, because it is hard coded above.
for frame_index_nuim, sample_nuim in enumerate(nuim.sample_data):
if (
sample_nuim["fileformat"] == "png" or sample_nuim["fileformat"] == "jpg"
) and sample_nuim["is_key_frame"]:
if sample_nuim["fileformat"] == "png" or sample_nuim["fileformat"] == "jpg":
cam = sample_nuim["filename"].split("/")[1]
cam_idx = self._camera2idx[cam]

Expand Down
18 changes: 15 additions & 3 deletions perception_dataset/t4_dataset/annotation_files_updater.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import json
import os.path as osp
from typing import Any, Dict
from pathlib import Path
from typing import Any, Dict, List

from perception_dataset.t4_dataset.annotation_files_generator import AnnotationFilesGenerator
from perception_dataset.t4_dataset.classes import (
Expand All @@ -12,6 +13,9 @@
SurfaceAnnTable,
VisibilityTable,
)
from perception_dataset.t4_dataset.resolver.keyframe_consistency_resolver import (
KeyFrameConsistencyResolver,
)


def _load_json(filepath: str) -> Any:
Expand All @@ -21,8 +25,13 @@ def _load_json(filepath: str) -> Any:


class AnnotationFilesUpdater(AnnotationFilesGenerator):
def __init__(self, with_camera: bool = True, description: Dict[str, Dict[str, str]] = ...):
super().__init__(with_camera, description)
def __init__(
self,
with_camera: bool = True,
description: Dict[str, Dict[str, str]] = ...,
surface_categories: List[str] = [],
):
super().__init__(with_camera, description, surface_categories)
self.description = description

def convert_one_scene(
Expand All @@ -46,6 +55,9 @@ def convert_one_scene(
dataset_name=dataset_name,
)

modifier = KeyFrameConsistencyResolver()
modifier.inspect_and_fix_t4_segment(Path(output_dir))

def _init_table_from_json(self, anno_dir: str) -> None:
self._attribute_table = AttributeTable.from_json(
filepath=osp.join(anno_dir, AttributeTable.FILENAME),
Expand Down
2 changes: 1 addition & 1 deletion perception_dataset/t4_dataset/classes/object_ann.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def from_json(cls, filepath: str) -> ObjectAnnTable:
attribute_tokens=item["attribute_tokens"],
bbox=item["bbox"],
mask=item["mask"],
automatic_annotation=item["automatic_annotation"],
automatic_annotation=item.get("automatic_annotation", False),
)
record.token = item["token"]
table.set_record_to_table(record)
Expand Down
2 changes: 1 addition & 1 deletion perception_dataset/t4_dataset/classes/sample_annotation.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ def from_json(cls, filepath: str) -> SampleAnnotationTable:
},
num_lidar_pts=item["num_lidar_pts"],
num_radar_pts=item["num_radar_pts"],
automatic_annotation=item["automatic_annotation"],
automatic_annotation=item.get("automatic_annotation", False),
)
record.token = item["token"]
table.set_record_to_table(record)
Expand Down
2 changes: 1 addition & 1 deletion perception_dataset/t4_dataset/classes/surface_ann.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def from_json(cls, filepath: str) -> SurfaceAnnTable:
category_token=item["category_token"],
mask=item["mask"],
sample_data_token=item["sample_data_token"],
automatic_annotation=item["automatic_annotation"],
automatic_annotation=item.get("automatic_annotation", False),
)
record.token = item["token"]
table.set_record_to_table(record)
Expand Down
Loading