| on | |
| maps or list of prediction result filenames. | |
| gt_seg_maps (list[ndarray] | list[str]): list of ground truth | |
| segmentation maps or list of label filenames. | |
| num_classes (int): Number of categories. | |
| ignore_index (int): Index that will be ignored in evaluation. | |
| nan_to_num (int, optional): If specified, NaN values will be replaced | |
| by the numbers defined by the user. Default: None. | |
| label_map (dict): Mapping old labels to new labels. Default: dict(). | |
| reduce_zero_label (bool): Wether ignore zero label. Default: False. | |
| beta (int): Determines the weight of recall in the combined score. | |
| Default: False. | |
| Returns: | |
| dict[str, float | ndarray]: Default metrics. | |
| <aAcc> float: Overall accuracy on all images. | |
| <Fscore> ndarray: Per category recall, shape (num_classes, ). | |
| <Precision> ndarray: Per category precision, shape (num_classes, ). | |
| <Recall> ndarray: Per category f-score, shape (num_classes, ). | |
| """ | |
| fscore_result = eval_metrics( | |
| results=results, | |
| gt_seg_maps=gt_seg_maps, | |
| num_classes=num_classes, | |
| ignore_index=ignore_index, | |
| metrics=['mFscore'], | |
| nan_to_num=nan_to_num, | |
| label_map=label_map, | |
| reduce_zero_label=reduce_zero_label, | |
| beta=beta) | |
| return fscore_result | |
| def eval_metrics(results, | |
| gt_seg_maps, | |
| num_classes, | |
| ignore_index, | |
| metrics=['mIoU'], | |
| nan_to_num=None, | |
| label_map=dict(), | |
| reduce_zero_label=False, | |
| beta=1): | |
| """Calculate evaluation metrics | |
| Args: | |
| results (list[ndarray] | list[str]): List of prediction segmentation | |
| maps or list of prediction result filenames. | |
| gt_seg_maps (list[ndarray] | list[str]): list of ground truth | |
| segmentation maps or list of label filenames. | |
| num_classes (int): Number of categories. | |
| ignore_index (int): Index that will be ignored in evaluation. | |
| metrics (list[str] | str): Metrics to be evaluated, 'mIoU' and 'mDice'. | |
| nan_to_num (int, optional): If specified, NaN values will be replaced | |
| by the numbers defined by the user. Default: None. | |
| label_map (dict): Mapping old labels to new labels. Default: dict(). | |
| reduce_zero_label (bool): Wether ignore zero label. Default: False. | |
| Returns: | |
| float: Overall accuracy on all images. | |
| ndarray: Per category accuracy, shape (num_classes, ). | |
| ndarray: Per category evaluation metrics, shape (num_classes, ). | |
| """ | |
| if isinstance(metrics, str): | |
| metrics = [metrics] | |
| allowed_metrics = ['mIoU', 'mDice', 'mFscore'] | |
| if not set(metrics).issubset(set(allowed_metrics)): | |
| raise KeyError('metrics {} is not supported'.format(metrics)) | |
| total_area_intersect, total_area_union, total_area_pred_label, \ | |
| total_area_label = total_intersect_and_union( | |
| results, gt_seg_maps, num_classes, ignore_index, label_map, | |
| reduce_zero_label) | |
| all_acc = total_area_intersect.sum() / total_area_label.sum() | |
| ret_metrics = OrderedDict({'aAcc': all_acc}) | |
| for metri |