From 35083b0f64aebd177f70a0c6106459fb94260b73 Mon Sep 17 00:00:00 2001 From: Nicki Skafte Date: Thu, 10 Oct 2024 11:15:20 +0200 Subject: [PATCH] fix remaining tests --- src/torchmetrics/detection/mean_ap.py | 109 +++++++++++++------------- tests/unittests/detection/test_map.py | 2 +- 2 files changed, 57 insertions(+), 54 deletions(-) diff --git a/src/torchmetrics/detection/mean_ap.py b/src/torchmetrics/detection/mean_ap.py index 587f4a2d126..948fa554348 100644 --- a/src/torchmetrics/detection/mean_ap.py +++ b/src/torchmetrics/detection/mean_ap.py @@ -531,64 +531,67 @@ def compute(self) -> dict: for anno in coco_preds.dataset["annotations"]: anno["area"] = anno[f"area_{i_type}"] - coco_eval = self.cocoeval(coco_target, coco_preds, iouType=i_type) # type: ignore[operator] - coco_eval.params.iouThrs = np.array(self.iou_thresholds, dtype=np.float64) - coco_eval.params.recThrs = np.array(self.rec_thresholds, dtype=np.float64) - coco_eval.params.maxDets = self.max_detection_thresholds - - coco_eval.evaluate() - coco_eval.accumulate() - coco_eval.summarize() - stats = coco_eval.stats - result_dict.update(self._coco_stats_to_tensor_dict(stats, prefix=prefix)) - - summary = {} - if self.extended_summary: - summary = { - f"{prefix}ious": apply_to_collection( - coco_eval.ious, np.ndarray, lambda x: torch.tensor(x, dtype=torch.float32) - ), - f"{prefix}precision": torch.tensor(coco_eval.eval["precision"]), - f"{prefix}recall": torch.tensor(coco_eval.eval["recall"]), - f"{prefix}scores": torch.tensor(coco_eval.eval["scores"]), - } - result_dict.update(summary) - - # if class mode is enabled, evaluate metrics per class - if self.class_metrics: - # regardless of average method, reinitialize dataset to get rid of internal state which can - # lead to wrong results when evaluating per class - coco_preds, coco_target = self._get_coco_datasets(average="macro") + if len(coco_preds.imgs) == 0 or len(coco_target.imgs) == 0: + result_dict.update(self._coco_stats_to_tensor_dict(12 * [-1.0], prefix=prefix)) + else: coco_eval = self.cocoeval(coco_target, coco_preds, iouType=i_type) # type: ignore[operator] coco_eval.params.iouThrs = np.array(self.iou_thresholds, dtype=np.float64) coco_eval.params.recThrs = np.array(self.rec_thresholds, dtype=np.float64) coco_eval.params.maxDets = self.max_detection_thresholds - map_per_class_list = [] - mar_per_class_list = [] - for class_id in self._get_classes(): - coco_eval.params.catIds = [class_id] - with contextlib.redirect_stdout(io.StringIO()): - coco_eval.evaluate() - coco_eval.accumulate() - coco_eval.summarize() - class_stats = coco_eval.stats - - map_per_class_list.append(torch.tensor([class_stats[0]])) - mar_per_class_list.append(torch.tensor([class_stats[8]])) - - map_per_class_values = torch.tensor(map_per_class_list, dtype=torch.float32) - mar_per_class_values = torch.tensor(mar_per_class_list, dtype=torch.float32) - else: - map_per_class_values = torch.tensor([-1], dtype=torch.float32) - mar_per_class_values = torch.tensor([-1], dtype=torch.float32) - prefix = "" if len(self.iou_type) == 1 else f"{i_type}_" - result_dict.update( - { - f"{prefix}map_per_class": map_per_class_values, - f"{prefix}mar_{self.max_detection_thresholds[-1]}_per_class": mar_per_class_values, - }, - ) + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + stats = coco_eval.stats + result_dict.update(self._coco_stats_to_tensor_dict(stats, prefix=prefix)) + + summary = {} + if self.extended_summary: + summary = { + f"{prefix}ious": apply_to_collection( + coco_eval.ious, np.ndarray, lambda x: torch.tensor(x, dtype=torch.float32) + ), + f"{prefix}precision": torch.tensor(coco_eval.eval["precision"]), + f"{prefix}recall": torch.tensor(coco_eval.eval["recall"]), + f"{prefix}scores": torch.tensor(coco_eval.eval["scores"]), + } + result_dict.update(summary) + + # if class mode is enabled, evaluate metrics per class + if self.class_metrics: + # regardless of average method, reinitialize dataset to get rid of internal state which can + # lead to wrong results when evaluating per class + coco_preds, coco_target = self._get_coco_datasets(average="macro") + coco_eval = self.cocoeval(coco_target, coco_preds, iouType=i_type) # type: ignore[operator] + coco_eval.params.iouThrs = np.array(self.iou_thresholds, dtype=np.float64) + coco_eval.params.recThrs = np.array(self.rec_thresholds, dtype=np.float64) + coco_eval.params.maxDets = self.max_detection_thresholds + + map_per_class_list = [] + mar_per_class_list = [] + for class_id in self._get_classes(): + coco_eval.params.catIds = [class_id] + with contextlib.redirect_stdout(io.StringIO()): + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + class_stats = coco_eval.stats + + map_per_class_list.append(torch.tensor([class_stats[0]])) + mar_per_class_list.append(torch.tensor([class_stats[8]])) + + map_per_class_values = torch.tensor(map_per_class_list, dtype=torch.float32) + mar_per_class_values = torch.tensor(mar_per_class_list, dtype=torch.float32) + else: + map_per_class_values = torch.tensor([-1], dtype=torch.float32) + mar_per_class_values = torch.tensor([-1], dtype=torch.float32) + prefix = "" if len(self.iou_type) == 1 else f"{i_type}_" + result_dict.update( + { + f"{prefix}map_per_class": map_per_class_values, + f"{prefix}mar_{self.max_detection_thresholds[-1]}_per_class": mar_per_class_values, + }, + ) result_dict.update({"classes": torch.tensor(self._get_classes(), dtype=torch.int32)}) return result_dict diff --git a/tests/unittests/detection/test_map.py b/tests/unittests/detection/test_map.py index 3c749864a32..5b869488131 100644 --- a/tests/unittests/detection/test_map.py +++ b/tests/unittests/detection/test_map.py @@ -410,7 +410,7 @@ def test_compare_both_same_time(tmpdir, backend): }, ], [ - {"boxes": Tensor([]), "scores": Tensor([]), "labels": Tensor([])}, + {"boxes": Tensor([]), "scores": Tensor([]), "labels": IntTensor([])}, ], ], "target": [