Spaces:
Runtime error
Runtime error
Upload metrics.py
Browse files- metrics.py +42 -0
metrics.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
|
| 3 |
+
Script includes functions to compute evaluation metrics
|
| 4 |
+
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
from sklearn.metrics import roc_auc_score
|
| 9 |
+
|
| 10 |
+
# Function to compute Dice index score for a tissue region indicated by label
|
| 11 |
+
def compute_dice(pred, gt, label):
|
| 12 |
+
pred = pred.flatten()
|
| 13 |
+
gt = gt.flatten()
|
| 14 |
+
pred[pred!=label] = 0
|
| 15 |
+
pred[pred==label] = 1
|
| 16 |
+
gt[gt != label] = 0
|
| 17 |
+
gt[gt == label] = 1
|
| 18 |
+
pred_pixels = np.sum(pred)
|
| 19 |
+
gt_pixels = np.sum(gt)
|
| 20 |
+
denom = (pred_pixels + gt_pixels)
|
| 21 |
+
if (gt_pixels == 0):
|
| 22 |
+
return -1
|
| 23 |
+
return np.sum(pred[gt == 1]) * 2.0 / denom
|
| 24 |
+
|
| 25 |
+
# Function to compute AUC-ROC for a tissue region indicated by label
|
| 26 |
+
def compute_auc_roc(pred, gt, label):
|
| 27 |
+
pred_binary = np.where(pred == label, 1, 0)
|
| 28 |
+
gt_binary = np.where(gt == label, 1, 0)
|
| 29 |
+
if (np.sum(gt_binary) == 0 or np.all(gt_binary == 1) or np.all(pred_binary == 1)):
|
| 30 |
+
return -1
|
| 31 |
+
if (np.sum(pred_binary) == 0):
|
| 32 |
+
return 0
|
| 33 |
+
pred_flat = pred_binary.flatten()
|
| 34 |
+
gt_flat = gt_binary.flatten()
|
| 35 |
+
auc_roc = roc_auc_score(gt_flat, pred_flat)
|
| 36 |
+
return auc_roc
|
| 37 |
+
|
| 38 |
+
# Function to compute Accuracy for a tissue region indicated by label
|
| 39 |
+
def compute_accuracy_metrics(pred, gt):
|
| 40 |
+
correct_predictions = np.sum(pred == gt)
|
| 41 |
+
overall_count = pred.size
|
| 42 |
+
return correct_predictions, overall_count
|