|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
|
This script is almost same with https://github.com/MIC-DKFZ/nnDetection/blob/main/nndet/evaluator/detection/matching.py |
|
|
The changes include 1) code reformatting, 2) docstrings, |
|
|
3) allow input args gt_ignore to be optional. (If so, no GT boxes will be ignored.) |
|
|
""" |
|
|
|
|
|
from __future__ import annotations |
|
|
|
|
|
from collections.abc import Callable, Sequence |
|
|
|
|
|
import numpy as np |
|
|
|
|
|
__all__ = ["matching_batch"] |
|
|
|
|
|
|
|
|
def matching_batch( |
|
|
iou_fn: Callable[[np.ndarray, np.ndarray], np.ndarray], |
|
|
iou_thresholds: Sequence[float], |
|
|
pred_boxes: Sequence[np.ndarray], |
|
|
pred_classes: Sequence[np.ndarray], |
|
|
pred_scores: Sequence[np.ndarray], |
|
|
gt_boxes: Sequence[np.ndarray], |
|
|
gt_classes: Sequence[np.ndarray], |
|
|
gt_ignore: Sequence[Sequence[bool]] | Sequence[np.ndarray] | None = None, |
|
|
max_detections: int = 100, |
|
|
) -> list[dict[int, dict[str, np.ndarray]]]: |
|
|
""" |
|
|
Match boxes of a batch to corresponding ground truth for each category |
|
|
independently. |
|
|
|
|
|
Args: |
|
|
iou_fn: compute overlap for each pair |
|
|
iou_thresholds: defined which IoU thresholds should be evaluated |
|
|
pred_boxes: predicted boxes from single batch; List[[D, dim * 2]], |
|
|
D number of predictions |
|
|
pred_classes: predicted classes from a single batch; List[[D]], |
|
|
D number of predictions |
|
|
pred_scores: predicted score for each bounding box; List[[D]], |
|
|
D number of predictions |
|
|
gt_boxes: ground truth boxes; List[[G, dim * 2]], G number of ground |
|
|
truth |
|
|
gt_classes: ground truth classes; List[[G]], G number of ground truth |
|
|
gt_ignore: specified if which ground truth boxes are not counted as |
|
|
true positives. If not given, when use all the gt_boxes. |
|
|
(detections which match theses boxes are not counted as false |
|
|
positives either); List[[G]], G number of ground truth |
|
|
max_detections: maximum number of detections which should be evaluated |
|
|
|
|
|
Returns: |
|
|
List[Dict[int, Dict[str, np.ndarray]]], each Dict[str, np.ndarray] corresponds to an image. |
|
|
Dict has the following keys. |
|
|
|
|
|
- `dtMatches`: matched detections [T, D], where T = number of |
|
|
thresholds, D = number of detections |
|
|
- `gtMatches`: matched ground truth boxes [T, G], where T = number |
|
|
of thresholds, G = number of ground truth |
|
|
- `dtScores`: prediction scores [D] detection scores |
|
|
- `gtIgnore`: ground truth boxes which should be ignored |
|
|
[G] indicate whether ground truth should be ignored |
|
|
- `dtIgnore`: detections which should be ignored [T, D], |
|
|
indicate which detections should be ignored |
|
|
|
|
|
Example: |
|
|
|
|
|
.. code-block:: python |
|
|
|
|
|
from monai.data.box_utils import box_iou |
|
|
from monai.apps.detection.metrics.coco import COCOMetric |
|
|
from monai.apps.detection.metrics.matching import matching_batch |
|
|
# 3D example outputs of one image from detector |
|
|
val_outputs_all = [ |
|
|
{"boxes": torch.tensor([[1,1,1,3,4,5]],dtype=torch.float16), |
|
|
"labels": torch.randint(3,(1,)), |
|
|
"scores": torch.randn((1,)).absolute()}, |
|
|
] |
|
|
val_targets_all = [ |
|
|
{"boxes": torch.tensor([[1,1,1,2,6,4]],dtype=torch.float16), |
|
|
"labels": torch.randint(3,(1,))}, |
|
|
] |
|
|
|
|
|
coco_metric = COCOMetric( |
|
|
classes=['c0','c1','c2'], iou_list=[0.1], max_detection=[10] |
|
|
) |
|
|
results_metric = matching_batch( |
|
|
iou_fn=box_iou, |
|
|
iou_thresholds=coco_metric.iou_thresholds, |
|
|
pred_boxes=[val_data_i["boxes"].numpy() for val_data_i in val_outputs_all], |
|
|
pred_classes=[val_data_i["labels"].numpy() for val_data_i in val_outputs_all], |
|
|
pred_scores=[val_data_i["scores"].numpy() for val_data_i in val_outputs_all], |
|
|
gt_boxes=[val_data_i["boxes"].numpy() for val_data_i in val_targets_all], |
|
|
gt_classes=[val_data_i["labels"].numpy() for val_data_i in val_targets_all], |
|
|
) |
|
|
val_metric_dict = coco_metric(results_metric) |
|
|
print(val_metric_dict) |
|
|
""" |
|
|
results = [] |
|
|
if gt_ignore is None: |
|
|
gt_ignore = [np.full_like(gt_c, False) for gt_c in gt_classes] |
|
|
|
|
|
for pboxes, pclasses, pscores, gboxes, gclasses, gignore in zip( |
|
|
pred_boxes, pred_classes, pred_scores, gt_boxes, gt_classes, gt_ignore |
|
|
): |
|
|
|
|
|
img_classes = np.union1d(pclasses, gclasses) |
|
|
result = {} |
|
|
for c in img_classes: |
|
|
pred_mask = pclasses == c |
|
|
gt_mask = gclasses == c |
|
|
|
|
|
if not np.any(gt_mask): |
|
|
result[c] = _matching_no_gt( |
|
|
iou_thresholds=iou_thresholds, pred_scores=pscores[pred_mask], max_detections=max_detections |
|
|
) |
|
|
elif not np.any(pred_mask): |
|
|
result[c] = _matching_no_pred(iou_thresholds=iou_thresholds, gt_ignore=gignore[gt_mask]) |
|
|
else: |
|
|
result[c] = _matching_single_image_single_class( |
|
|
iou_fn=iou_fn, |
|
|
pred_boxes=pboxes[pred_mask], |
|
|
pred_scores=pscores[pred_mask], |
|
|
gt_boxes=gboxes[gt_mask], |
|
|
gt_ignore=gignore[gt_mask], |
|
|
max_detections=max_detections, |
|
|
iou_thresholds=iou_thresholds, |
|
|
) |
|
|
results.append(result) |
|
|
return results |
|
|
|
|
|
|
|
|
def _matching_no_gt( |
|
|
iou_thresholds: Sequence[float], pred_scores: np.ndarray, max_detections: int |
|
|
) -> dict[str, np.ndarray]: |
|
|
""" |
|
|
Matching result with not ground truth in image |
|
|
|
|
|
Args: |
|
|
iou_thresholds: defined which IoU thresholds should be evaluated |
|
|
dt_scores: predicted scores |
|
|
max_detections: maximum number of allowed detections per image. |
|
|
This functions uses this parameter to stay consistent with |
|
|
the actual matching function which needs this limit. |
|
|
|
|
|
Returns: |
|
|
computed matching, a Dict[str, np.ndarray] |
|
|
|
|
|
- `dtMatches`: matched detections [T, D], where T = number of |
|
|
thresholds, D = number of detections |
|
|
- `gtMatches`: matched ground truth boxes [T, G], where T = number |
|
|
of thresholds, G = number of ground truth |
|
|
- `dtScores`: prediction scores [D] detection scores |
|
|
- `gtIgnore`: ground truth boxes which should be ignored |
|
|
[G] indicate whether ground truth should be ignored |
|
|
- `dtIgnore`: detections which should be ignored [T, D], |
|
|
indicate which detections should be ignored |
|
|
""" |
|
|
dt_ind = np.argsort(-pred_scores, kind="mergesort") |
|
|
dt_ind = dt_ind[:max_detections] |
|
|
dt_scores = pred_scores[dt_ind] |
|
|
|
|
|
num_preds = len(dt_scores) |
|
|
|
|
|
gt_match: np.ndarray = np.array([[]] * len(iou_thresholds)) |
|
|
dt_match: np.ndarray = np.zeros((len(iou_thresholds), num_preds)) |
|
|
dt_ignore: np.ndarray = np.zeros((len(iou_thresholds), num_preds)) |
|
|
|
|
|
return { |
|
|
"dtMatches": dt_match, |
|
|
"gtMatches": gt_match, |
|
|
"dtScores": dt_scores, |
|
|
"gtIgnore": np.array([]).reshape(-1), |
|
|
"dtIgnore": dt_ignore, |
|
|
} |
|
|
|
|
|
|
|
|
def _matching_no_pred(iou_thresholds: Sequence[float], gt_ignore: np.ndarray) -> dict[str, np.ndarray]: |
|
|
""" |
|
|
Matching result with no predictions |
|
|
|
|
|
Args: |
|
|
iou_thresholds: defined which IoU thresholds should be evaluated |
|
|
gt_ignore: specified if which ground truth boxes are not counted as |
|
|
true positives (detections which match theses boxes are not |
|
|
counted as false positives either); [G], G number of ground truth |
|
|
|
|
|
Returns: |
|
|
dict: computed matching |
|
|
|
|
|
- `dtMatches`: matched detections [T, D], where T = number of |
|
|
thresholds, D = number of detections |
|
|
- `gtMatches`: matched ground truth boxes [T, G], where T = number |
|
|
of thresholds, G = number of ground truth |
|
|
- `dtScores`: prediction scores [D] detection scores |
|
|
- `gtIgnore`: ground truth boxes which should be ignored |
|
|
[G] indicate whether ground truth should be ignored |
|
|
- `dtIgnore`: detections which should be ignored [T, D], |
|
|
indicate which detections should be ignored |
|
|
""" |
|
|
dt_scores: np.ndarray = np.array([]) |
|
|
dt_match: np.ndarray = np.array([[]] * len(iou_thresholds)) |
|
|
dt_ignore: np.ndarray = np.array([[]] * len(iou_thresholds)) |
|
|
|
|
|
n_gt = 0 if gt_ignore.size == 0 else gt_ignore.shape[0] |
|
|
gt_match = np.zeros((len(iou_thresholds), n_gt)) |
|
|
|
|
|
return { |
|
|
"dtMatches": dt_match, |
|
|
"gtMatches": gt_match, |
|
|
"dtScores": dt_scores, |
|
|
"gtIgnore": gt_ignore.reshape(-1), |
|
|
"dtIgnore": dt_ignore, |
|
|
} |
|
|
|
|
|
|
|
|
def _matching_single_image_single_class( |
|
|
iou_fn: Callable[[np.ndarray, np.ndarray], np.ndarray], |
|
|
pred_boxes: np.ndarray, |
|
|
pred_scores: np.ndarray, |
|
|
gt_boxes: np.ndarray, |
|
|
gt_ignore: np.ndarray, |
|
|
max_detections: int, |
|
|
iou_thresholds: Sequence[float], |
|
|
) -> dict[str, np.ndarray]: |
|
|
""" |
|
|
Adapted from https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/cocoeval.py |
|
|
|
|
|
Args: |
|
|
iou_fn: compute overlap for each pair |
|
|
iou_thresholds: defined which IoU thresholds should be evaluated |
|
|
pred_boxes: predicted boxes from single batch; [D, dim * 2], D number |
|
|
of predictions |
|
|
pred_scores: predicted score for each bounding box; [D], D number of |
|
|
predictions |
|
|
gt_boxes: ground truth boxes; [G, dim * 2], G number of ground truth |
|
|
gt_ignore: specified if which ground truth boxes are not counted as |
|
|
true positives (detections which match theses boxes are not |
|
|
counted as false positives either); [G], G number of ground truth |
|
|
max_detections: maximum number of detections which should be evaluated |
|
|
|
|
|
Returns: |
|
|
dict: computed matching |
|
|
|
|
|
- `dtMatches`: matched detections [T, D], where T = number of |
|
|
thresholds, D = number of detections |
|
|
- `gtMatches`: matched ground truth boxes [T, G], where T = number |
|
|
of thresholds, G = number of ground truth |
|
|
- `dtScores`: prediction scores [D] detection scores |
|
|
- `gtIgnore`: ground truth boxes which should be ignored |
|
|
[G] indicate whether ground truth should be ignored |
|
|
- `dtIgnore`: detections which should be ignored [T, D], |
|
|
indicate which detections should be ignored |
|
|
""" |
|
|
|
|
|
dt_ind = np.argsort(-pred_scores, kind="mergesort") |
|
|
dt_ind = dt_ind[:max_detections] |
|
|
|
|
|
pred_boxes = pred_boxes[dt_ind] |
|
|
pred_scores = pred_scores[dt_ind] |
|
|
|
|
|
|
|
|
gt_ind = np.argsort(gt_ignore, kind="mergesort") |
|
|
gt_boxes = gt_boxes[gt_ind] |
|
|
gt_ignore = gt_ignore[gt_ind] |
|
|
|
|
|
|
|
|
ious = iou_fn(pred_boxes, gt_boxes) |
|
|
|
|
|
num_preds, num_gts = ious.shape[0], ious.shape[1] |
|
|
gt_match = np.zeros((len(iou_thresholds), num_gts)) |
|
|
dt_match = np.zeros((len(iou_thresholds), num_preds)) |
|
|
dt_ignore = np.zeros((len(iou_thresholds), num_preds)) |
|
|
|
|
|
for tind, t in enumerate(iou_thresholds): |
|
|
for dind, _d in enumerate(pred_boxes): |
|
|
|
|
|
iou = min([t, 1 - 1e-10]) |
|
|
m = -1 |
|
|
|
|
|
for gind, _g in enumerate(gt_boxes): |
|
|
|
|
|
if gt_match[tind, gind] > 0: |
|
|
continue |
|
|
|
|
|
|
|
|
if m > -1 and gt_ignore[m] == 0 and gt_ignore[gind] == 1: |
|
|
break |
|
|
|
|
|
|
|
|
if ious[dind, gind] < iou: |
|
|
continue |
|
|
|
|
|
|
|
|
iou = ious[dind, gind] |
|
|
m = gind |
|
|
|
|
|
|
|
|
if m == -1: |
|
|
continue |
|
|
else: |
|
|
dt_ignore[tind, dind] = int(gt_ignore[m]) |
|
|
dt_match[tind, dind] = 1 |
|
|
gt_match[tind, m] = 1 |
|
|
|
|
|
|
|
|
return { |
|
|
"dtMatches": dt_match, |
|
|
"gtMatches": gt_match, |
|
|
"dtScores": pred_scores, |
|
|
"gtIgnore": gt_ignore.reshape(-1), |
|
|
"dtIgnore": dt_ignore, |
|
|
} |
|
|
|