From 526e851c3a99ed6772d6598ffae0f60fa3a81eea Mon Sep 17 00:00:00 2001 From: Zhongqi Miao Date: Thu, 11 Apr 2024 22:35:07 -0700 Subject: [PATCH 1/3] fix bugs for empty detections in timelaps --- PytorchWildlife/utils/post_process.py | 101 ++++++++++++++++---------- 1 file changed, 64 insertions(+), 37 deletions(-) diff --git a/PytorchWildlife/utils/post_process.py b/PytorchWildlife/utils/post_process.py index ef6f028b..8e0875cf 100644 --- a/PytorchWildlife/utils/post_process.py +++ b/PytorchWildlife/utils/post_process.py @@ -83,7 +83,7 @@ def save_crop_images(results, output_dir): ) -def save_detection_json(results, output_dir, categories=None, exclude_category_ids=[]): +def save_detection_json(det_results, output_dir, categories=None, exclude_category_ids=[], exclude_file_path=None): """ Save detection results to a JSON file. @@ -96,32 +96,39 @@ def save_detection_json(results, output_dir, categories=None, exclude_category_i List of categories for detected objects. Defaults to None. exclude_category_ids (list, optional): List of category IDs to exclude from the output. Defaults to []. Category IDs can be found in the definition of each models. + exclude_file_path (str, optional): + We can exclude the some path sections from the image ID. Defaults to None. """ json_results = {"annotations": [], "categories": categories} - with open(output_dir, "w") as f: - for r in results: - - # Category filtering - img_id = r["img_id"] - category = r["detections"].class_id - - bbox = r["detections"].xyxy.astype(int)[~np.isin(category, exclude_category_ids)] - confidence = r["detections"].confidence[~np.isin(category, exclude_category_ids)] - category = category[~np.isin(category, exclude_category_ids)] - - if not all([x in exclude_category_ids for x in category]): - json_results["annotations"].append( - { - "img_id": img_id, - "bbox": bbox.tolist(), - "category": category.tolist(), - "confidence": confidence.tolist(), - } - ) + for det_r in det_results: + + # Category filtering + img_id = det_r["img_id"] + category = det_r["detections"].class_id + + bbox = det_r["detections"].xyxy.astype(int)[~np.isin(category, exclude_category_ids)] + confidence = det_r["detections"].confidence[~np.isin(category, exclude_category_ids)] + category = category[~np.isin(category, exclude_category_ids)] + + # if not all([x in exclude_category_ids for x in category]): + json_results["annotations"].append( + { + "img_id": img_id.replace(exclude_file_path + os.sep, '') if exclude_file_path else img_id, + "bbox": bbox.tolist(), + "category": category.tolist(), + "confidence": confidence.tolist(), + } + ) + + with open(output_dir, "w") as f: json.dump(json_results, f, indent=4) -def save_detection_timelapse_json(det_results, output_dir, categories=None): + +def save_detection_timelapse_json( + det_results, output_dir, categories=None, + exclude_category_ids=[], exclude_file_path=None, detector={"detector": "megadetector_v5"} + ): """ Save detection results to a JSON file. @@ -132,26 +139,42 @@ def save_detection_timelapse_json(det_results, output_dir, categories=None): Path to save the output JSON file. categories (list, optional): List of categories for detected objects. Defaults to None. + exclude_category_ids (list, optional): + List of category IDs to exclude from the output. Defaults to []. Category IDs can be found in the definition of each models. + exclude_file_path (str, optional): + Some time, Timelapse has path issues. We can exclude the some path sections from the image ID. Defaults to None. + detector (dict, optional): + Default Timelapse info. Defaults to {"detector": "megadetector_v5}. """ + json_results = { - "info": {"detector": "megadetector_v5"}, + "info": info, "detection_categories": categories, "images": [] } for det_r in det_results: + + img_id = det_r["img_id"] + category_id_list = det_r["detections"].class_id + + bbox_list = det_r["detections"].xyxy.astype(int)[~np.isin(category_id_list, exclude_category_ids)] + confidence_list = det_r["detections"].confidence[~np.isin(category_id_list, exclude_category_ids)] + normalized_bbox_list = np.array(det_r["normalized_coords"])[~np.isin(category_id_list, exclude_category_ids)] + category_id_list = category_id_list[~np.isin(category_id_list, exclude_category_ids)] + + # if not all([x in exclude_category_ids for x in category_id_list]): image_annotations = { - "file": det_r["img_id"], - "max_detection_conf": max(det_r["detections"].confidence.tolist()), + "file": img_id.replace(exclude_file_path + os.sep, '') if exclude_file_path else img_id, + "max_detection_conf": float(max(confidence_list)) if len(confidence_list) > 0 else '', "detections": [] } - for i in range(len(det_r["detections"])): - det = det_r["detections"][i] - normalized_bbox = [float(y) for y in det_r["normalized_coords"][i]] + for i in range(len(bbox_list)): + normalized_bbox = [float(y) for y in normalized_bbox_list[i]] detection = { - "category": str(det.class_id[0]), - "conf": float(det.confidence[0]), + "category": str(category_id_list[i]), + "conf": float(confidence_list[i]), "bbox": [normalized_bbox[0], normalized_bbox[1], normalized_bbox[2]-normalized_bbox[0], normalized_bbox[3]-normalized_bbox[1]], "classifications": [] } @@ -165,7 +188,7 @@ def save_detection_timelapse_json(det_results, output_dir, categories=None): def save_detection_classification_json( - det_results, clf_results, output_path, det_categories=None, clf_categories=None + det_results, clf_results, output_path, det_categories=None, clf_categories=None, exclude_file_path=None ): """ Save classification results to a JSON file. @@ -181,6 +204,8 @@ def save_detection_classification_json( List of categories for detected objects. Defaults to None. clf_categories (list, optional): List of categories for classified objects. Defaults to None. + exclude_file_path (str, optional): + We can exclude the some path sections from the image ID. Defaults to None. """ json_results = { @@ -205,7 +230,7 @@ def save_detection_classification_json( json_results["annotations"].append( { - "img_id": str(det_r["img_id"]), + "img_id": str(det_r["img_id"]).replace(exclude_file_path + os.sep, '') if exclude_file_path else str(det_r["img_id"]), "bbox": [ [int(x) for x in sublist] for sublist in det_r["detections"].xyxy.astype(int).tolist() @@ -224,7 +249,8 @@ def save_detection_classification_json( def save_detection_classification_timelapse_json( - det_results, clf_results, output_path, det_categories=None, clf_categories=None + det_results, clf_results, output_path, det_categories=None, clf_categories=None, + exclude_file_path=None, info={"detector": "megadetector_v5"} ): """ Save detection and classification results to a JSON file in the specified format. @@ -240,10 +266,11 @@ def save_detection_classification_timelapse_json( Dictionary of categories for detected objects. Defaults to None. clf_categories (dict, optional): Dictionary of categories for classified objects. Defaults to None. - + exclude_file_path (str, optional): + We can exclude the some path sections from the image ID. Defaults to None. """ json_results = { - "info": {"detector": "megadetector_v5"}, + "info": info, "detection_categories": det_categories, "classification_categories": clf_categories, "images": [] @@ -251,8 +278,8 @@ def save_detection_classification_timelapse_json( for det_r in det_results: image_annotations = { - "file": det_r["img_id"], - "max_detection_conf": max(det_r["detections"].confidence.tolist()), + "file": str(det_r["img_id"]).replace(exclude_file_path + os.sep, '') if exclude_file_path else str(det_r["img_id"]), + "max_detection_conf": float(max(det_r["detections"].confidence)) if len(det_r["detections"].confidence) > 0 else '', "detections": [] } From 26c97d022e7c33a85583d69a190f8a5fc2c3369e Mon Sep 17 00:00:00 2001 From: Zhongqi Miao Date: Thu, 11 Apr 2024 22:38:10 -0700 Subject: [PATCH 2/3] add new argument in the timelapse output demos --- demo/image_demo.py | 8 ++++++-- demo/image_detection_colabdemo.ipynb | 11 ++++++++--- demo/image_detection_demo.ipynb | 8 ++++++-- 3 files changed, 20 insertions(+), 7 deletions(-) diff --git a/demo/image_demo.py b/demo/image_demo.py index 72251f71..0c097230 100644 --- a/demo/image_demo.py +++ b/demo/image_demo.py @@ -78,8 +78,12 @@ # Saving the detection results in JSON format pw_utils.save_detection_json(results, os.path.join(".","batch_output.json"), categories=detection_model.CLASS_NAMES, - exclude_category_ids=[]) + exclude_category_ids=[], # Category IDs can be found in the definition of each model. + exclude_file_path=None) # Saving the detection results in timelapse JSON format pw_utils.save_detection_timelapse_json(results, os.path.join(".","batch_output_timelapse.json"), - categories=detection_model.CLASS_NAMES) \ No newline at end of file + categories=detection_model.CLASS_NAMES, + exclude_category_ids=[], # Category IDs can be found in the definition of each model. + exclude_file_path=tgt_folder_path, + info={"detector": "MegaDetectorV5"}) \ No newline at end of file diff --git a/demo/image_detection_colabdemo.ipynb b/demo/image_detection_colabdemo.ipynb index 1c2451ea..acc278e0 100644 --- a/demo/image_detection_colabdemo.ipynb +++ b/demo/image_detection_colabdemo.ipynb @@ -782,6 +782,7 @@ } ], "source": [ + "import os\n", "import numpy as np\n", "from PIL import Image\n", "import torch\n", @@ -1042,9 +1043,10 @@ }, "outputs": [], "source": [ - "pw_utils.save_detection_json(results, \"./batch_output.json\",\n", + "pw_utils.save_detection_json(results, os.path.join(\".\",\"batch_output.json\"),\n", " categories=detection_model.CLASS_NAMES,\n", - " exclude_category_ids=[]) # Category IDs can be found in the definition of each model." + " exclude_category_ids=[], # Category IDs can be found in the definition of each model.\n", + " exclude_file_path=None)" ] }, { @@ -1064,7 +1066,10 @@ "outputs": [], "source": [ "pw_utils.save_detection_timelapse_json(results, os.path.join(\".\",\"batch_output_timelapse.json\"),\n", - " categories=detection_model.CLASS_NAMES)" + " categories=detection_model.CLASS_NAMES,\n", + " exclude_category_ids=[], # Category IDs can be found in the definition of each model.\n", + " exclude_file_path=tgt_folder_path,\n", + " info={\"detector\": \"MegaDetectorV5\"})" ] }, { diff --git a/demo/image_detection_demo.ipynb b/demo/image_detection_demo.ipynb index 31457754..5af15e07 100644 --- a/demo/image_detection_demo.ipynb +++ b/demo/image_detection_demo.ipynb @@ -178,7 +178,8 @@ "source": [ "pw_utils.save_detection_json(results, os.path.join(\".\",\"batch_output.json\"),\n", " categories=detection_model.CLASS_NAMES,\n", - " exclude_category_ids=[]) # Category IDs can be found in the definition of each model." + " exclude_category_ids=[], # Category IDs can be found in the definition of each model.\n", + " exclude_file_path=None)" ] }, { @@ -198,7 +199,10 @@ "outputs": [], "source": [ "pw_utils.save_detection_timelapse_json(results, os.path.join(\".\",\"batch_output_timelapse.json\"),\n", - " categories=detection_model.CLASS_NAMES)" + " categories=detection_model.CLASS_NAMES,\n", + " exclude_category_ids=[], # Category IDs can be found in the definition of each model.\n", + " exclude_file_path=tgt_folder_path,\n", + " info={\"detector\": \"MegaDetectorV5\"})" ] }, { From d855536f8171cb86c55b474930a1dc0089eaae5f Mon Sep 17 00:00:00 2001 From: Zhongqi Miao Date: Thu, 11 Apr 2024 22:38:27 -0700 Subject: [PATCH 3/3] bump version to 1.0.2.10 for bug fixes --- setup.py | 2 +- version.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index ae58b769..cb9f06f6 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ long_description = file.read() setup( name='PytorchWildlife', - version='1.0.2.9.1', + version='1.0.2.10', packages=find_packages(), url='https://github.com/microsoft/CameraTraps/', license='MIT', diff --git a/version.txt b/version.txt index c2a950f1..c12b9339 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -1.0.2.9 \ No newline at end of file +1.0.2.10 \ No newline at end of file