Skip to content

Commit

Permalink
build(deps): update faster-coco-eval requirement from ==1.5.* to ==1.…
Browse files Browse the repository at this point in the history
…6.* in /requirements (#2750)

* build(deps): update faster-coco-eval requirement in /requirements

Updates the requirements on [faster-coco-eval](https://github.com/MiXaiLL76/faster_coco_eval) to permit the latest version.
- [Release notes](https://github.com/MiXaiLL76/faster_coco_eval/releases)
- [Changelog](https://github.com/MiXaiLL76/faster_coco_eval/blob/main/history.md)
- [Commits](MiXaiLL76/faster_coco_eval@1.5.2...1.6.0)

---
updated-dependencies:
- dependency-name: faster-coco-eval
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <[email protected]>

* fix implementation
* update tests
* fix remaining tests
* faster-coco-eval >=1.6.2, <1.7.0

---------

Signed-off-by: dependabot[bot] <[email protected]>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Nicki Skafte <[email protected]>
Co-authored-by: Jirka Borovec <[email protected]>
  • Loading branch information
3 people authored Oct 11, 2024
1 parent 151fef1 commit c2c68b6
Show file tree
Hide file tree
Showing 3 changed files with 75 additions and 62 deletions.
2 changes: 1 addition & 1 deletion requirements/detection_test.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment

faster-coco-eval ==1.5.*
faster-coco-eval >=1.6.3, <1.7.0
108 changes: 55 additions & 53 deletions src/torchmetrics/detection/mean_ap.py
Original file line number Diff line number Diff line change
Expand Up @@ -531,65 +531,67 @@ def compute(self) -> dict:
for anno in coco_preds.dataset["annotations"]:
anno["area"] = anno[f"area_{i_type}"]

coco_eval = self.cocoeval(coco_target, coco_preds, iouType=i_type) # type: ignore[operator]
coco_eval.params.iouThrs = np.array(self.iou_thresholds, dtype=np.float64)
coco_eval.params.recThrs = np.array(self.rec_thresholds, dtype=np.float64)
coco_eval.params.maxDets = self.max_detection_thresholds

coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
stats = coco_eval.stats
result_dict.update(self._coco_stats_to_tensor_dict(stats, prefix=prefix))

summary = {}
if self.extended_summary:
summary = {
f"{prefix}ious": apply_to_collection(
coco_eval.ious, np.ndarray, lambda x: torch.tensor(x, dtype=torch.float32)
),
f"{prefix}precision": torch.tensor(coco_eval.eval["precision"]),
f"{prefix}recall": torch.tensor(coco_eval.eval["recall"]),
f"{prefix}scores": torch.tensor(coco_eval.eval["scores"]),
}
result_dict.update(summary)

# if class mode is enabled, evaluate metrics per class
if self.class_metrics:
if self.average == "micro":
# since micro averaging have all the data in one class, we need to reinitialize the coco_eval
# object in macro mode to get the per class stats
if len(coco_preds.imgs) == 0 or len(coco_target.imgs) == 0:
result_dict.update(self._coco_stats_to_tensor_dict(12 * [-1.0], prefix=prefix))
else:
coco_eval = self.cocoeval(coco_target, coco_preds, iouType=i_type) # type: ignore[operator]
coco_eval.params.iouThrs = np.array(self.iou_thresholds, dtype=np.float64)
coco_eval.params.recThrs = np.array(self.rec_thresholds, dtype=np.float64)
coco_eval.params.maxDets = self.max_detection_thresholds

coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
stats = coco_eval.stats
result_dict.update(self._coco_stats_to_tensor_dict(stats, prefix=prefix))

summary = {}
if self.extended_summary:
summary = {
f"{prefix}ious": apply_to_collection(
coco_eval.ious, np.ndarray, lambda x: torch.tensor(x, dtype=torch.float32)
),
f"{prefix}precision": torch.tensor(coco_eval.eval["precision"]),
f"{prefix}recall": torch.tensor(coco_eval.eval["recall"]),
f"{prefix}scores": torch.tensor(coco_eval.eval["scores"]),
}
result_dict.update(summary)

# if class mode is enabled, evaluate metrics per class
if self.class_metrics:
# regardless of average method, reinitialize dataset to get rid of internal state which can
# lead to wrong results when evaluating per class
coco_preds, coco_target = self._get_coco_datasets(average="macro")
coco_eval = self.cocoeval(coco_target, coco_preds, iouType=i_type) # type: ignore[operator]
coco_eval.params.iouThrs = np.array(self.iou_thresholds, dtype=np.float64)
coco_eval.params.recThrs = np.array(self.rec_thresholds, dtype=np.float64)
coco_eval.params.maxDets = self.max_detection_thresholds

map_per_class_list = []
mar_per_class_list = []
for class_id in self._get_classes():
coco_eval.params.catIds = [class_id]
with contextlib.redirect_stdout(io.StringIO()):
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
class_stats = coco_eval.stats

map_per_class_list.append(torch.tensor([class_stats[0]]))
mar_per_class_list.append(torch.tensor([class_stats[8]]))

map_per_class_values = torch.tensor(map_per_class_list, dtype=torch.float32)
mar_per_class_values = torch.tensor(mar_per_class_list, dtype=torch.float32)
else:
map_per_class_values = torch.tensor([-1], dtype=torch.float32)
mar_per_class_values = torch.tensor([-1], dtype=torch.float32)
prefix = "" if len(self.iou_type) == 1 else f"{i_type}_"
result_dict.update(
{
f"{prefix}map_per_class": map_per_class_values,
f"{prefix}mar_{self.max_detection_thresholds[-1]}_per_class": mar_per_class_values,
},
)
map_per_class_list = []
mar_per_class_list = []
for class_id in self._get_classes():
coco_eval.params.catIds = [class_id]
with contextlib.redirect_stdout(io.StringIO()):
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
class_stats = coco_eval.stats

map_per_class_list.append(torch.tensor([class_stats[0]]))
mar_per_class_list.append(torch.tensor([class_stats[8]]))

map_per_class_values = torch.tensor(map_per_class_list, dtype=torch.float32)
mar_per_class_values = torch.tensor(mar_per_class_list, dtype=torch.float32)
else:
map_per_class_values = torch.tensor([-1], dtype=torch.float32)
mar_per_class_values = torch.tensor([-1], dtype=torch.float32)
prefix = "" if len(self.iou_type) == 1 else f"{i_type}_"
result_dict.update(
{
f"{prefix}map_per_class": map_per_class_values,
f"{prefix}mar_{self.max_detection_thresholds[-1]}_per_class": mar_per_class_values,
},
)
result_dict.update({"classes": torch.tensor(self._get_classes(), dtype=torch.int32)})

return result_dict
Expand Down
27 changes: 19 additions & 8 deletions tests/unittests/detection/test_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -410,7 +410,7 @@ def test_compare_both_same_time(tmpdir, backend):
},
],
[
{"boxes": Tensor([]), "scores": Tensor([]), "labels": Tensor([])},
{"boxes": Tensor([]), "scores": Tensor([]), "labels": IntTensor([])},
],
],
"target": [
Expand Down Expand Up @@ -632,7 +632,12 @@ def test_segm_iou_empty_gt_mask(self, backend):
[{"masks": torch.randint(0, 1, (1, 10, 10)).bool(), "scores": Tensor([0.5]), "labels": IntTensor([4])}],
[{"masks": Tensor([]), "labels": IntTensor([])}],
)
metric.compute()
res = metric.compute()
for key, value in res.items():
if key == "classes":
continue
assert value.item() == -1, f"Expected -1 for {key}"
assert res["classes"] == 4

def test_segm_iou_empty_pred_mask(self, backend):
"""Test empty predictions."""
Expand All @@ -641,7 +646,12 @@ def test_segm_iou_empty_pred_mask(self, backend):
[{"masks": torch.BoolTensor([]), "scores": Tensor([]), "labels": IntTensor([])}],
[{"masks": torch.randint(0, 1, (1, 10, 10)).bool(), "labels": IntTensor([4])}],
)
metric.compute()
res = metric.compute()
for key, value in res.items():
if key == "classes":
continue
assert value.item() == -1, f"Expected -1 for {key}"
assert res["classes"] == 4

def test_error_on_wrong_input(self, backend):
"""Test class input validation."""
Expand Down Expand Up @@ -862,17 +872,18 @@ def test_average_argument(self, class_metrics, backend):
_preds = apply_to_collection(deepcopy(_inputs["preds"]), IntTensor, lambda x: torch.ones_like(x))
_target = apply_to_collection(deepcopy(_inputs["target"]), IntTensor, lambda x: torch.ones_like(x))

metric_micro = MeanAveragePrecision(average="micro", class_metrics=class_metrics, backend=backend)
metric_micro.update(deepcopy(_inputs["preds"][0]), deepcopy(_inputs["target"][0]))
metric_micro.update(deepcopy(_inputs["preds"][1]), deepcopy(_inputs["target"][1]))
result_micro = metric_micro.compute()

metric_macro = MeanAveragePrecision(average="macro", class_metrics=class_metrics, backend=backend)
metric_macro.update(_preds[0], _target[0])
metric_macro.update(_preds[1], _target[1])
result_macro = metric_macro.compute()

metric_micro = MeanAveragePrecision(average="micro", class_metrics=class_metrics, backend=backend)
metric_micro.update(_inputs["preds"][0], _inputs["target"][0])
metric_micro.update(_inputs["preds"][1], _inputs["target"][1])
result_micro = metric_micro.compute()

if class_metrics:
print(result_macro["map_per_class"], result_micro["map_per_class"])
assert torch.allclose(result_macro["map_per_class"], result_micro["map_per_class"])
assert torch.allclose(result_macro["mar_100_per_class"], result_micro["mar_100_per_class"])
else:
Expand Down

0 comments on commit c2c68b6

Please sign in to comment.