| from sklearn.metrics import auc, roc_auc_score, average_precision_score, f1_score, precision_recall_curve, pairwise
|
| import numpy as np
|
| from skimage import measure
|
|
|
| def cal_pro_score(masks, amaps, max_step=200, expect_fpr=0.3):
|
|
|
| binary_amaps = np.zeros_like(amaps, dtype=bool)
|
| min_th, max_th = amaps.min(), amaps.max()
|
| delta = (max_th - min_th) / max_step
|
| pros, fprs, ths = [], [], []
|
| for th in np.arange(min_th, max_th, delta):
|
| binary_amaps[amaps <= th], binary_amaps[amaps > th] = 0, 1
|
| pro = []
|
| for binary_amap, mask in zip(binary_amaps, masks):
|
| for region in measure.regionprops(measure.label(mask)):
|
| tp_pixels = binary_amap[region.coords[:, 0], region.coords[:, 1]].sum()
|
| pro.append(tp_pixels / region.area)
|
| inverse_masks = 1 - masks
|
| fp_pixels = np.logical_and(inverse_masks, binary_amaps).sum()
|
| fpr = fp_pixels / inverse_masks.sum()
|
| pros.append(np.array(pro).mean())
|
| fprs.append(fpr)
|
| ths.append(th)
|
| pros, fprs, ths = np.array(pros), np.array(fprs), np.array(ths)
|
| idxes = fprs < expect_fpr
|
| fprs = fprs[idxes]
|
| fprs = (fprs - fprs.min()) / (fprs.max() - fprs.min())
|
| pro_auc = auc(fprs, pros[idxes])
|
| return pro_auc
|
|
|
|
|
| def image_level_metrics(results, obj, metric):
|
| gt = results[obj]['gt_sp']
|
| pr = results[obj]['pr_sp']
|
| gt = np.array(gt)
|
| pr = np.array(pr)
|
| if metric == 'image-auroc':
|
| performance = roc_auc_score(gt, pr)
|
| elif metric == 'image-ap':
|
| performance = average_precision_score(gt, pr)
|
|
|
| return performance
|
|
|
|
|
|
|
| def pixel_level_metrics(results, obj, metric):
|
| gt = results[obj]['imgs_masks']
|
| pr = results[obj]['anomaly_maps']
|
| gt = np.array(gt)
|
| pr = np.array(pr)
|
| if metric == 'pixel-auroc':
|
| performance = roc_auc_score(gt.ravel(), pr.ravel())
|
| elif metric == 'pixel-aupro':
|
| if len(gt.shape) == 4:
|
| gt = gt.squeeze(1)
|
| if len(pr.shape) == 4:
|
| pr = pr.squeeze(1)
|
| performance = cal_pro_score(gt, pr)
|
| return performance
|
| |