Upload balanced_accuracy_multilabel.py
Browse files- balanced_accuracy_multilabel.py +127 -27
balanced_accuracy_multilabel.py
CHANGED
|
@@ -4,21 +4,29 @@ import datasets
|
|
| 4 |
|
| 5 |
_DESCRIPTION = """
|
| 6 |
Multilabel Balanced Accuracy:
|
| 7 |
-
|
| 8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
"""
|
| 10 |
|
| 11 |
_KWARGS_DESCRIPTION = """
|
| 12 |
Args:
|
| 13 |
predictions: list[list[int or float]] of shape (N, L).
|
| 14 |
If from_probas=True, values are probabilities in [0,1]; otherwise 0/1 labels.
|
| 15 |
-
references: list[list[int]] of shape (N, L)
|
| 16 |
from_probas: bool, default False. If True, binarize predictions with `threshold`.
|
| 17 |
-
threshold: float in (0,1)
|
| 18 |
-
zero_division: float, default 0.0. Used when denominator is 0 (no positives/negatives
|
| 19 |
-
|
|
|
|
|
|
|
|
|
|
| 20 |
Returns:
|
| 21 |
-
dict: {"balanced_accuracy": float, optional "per_label_ba": list[float]}
|
| 22 |
"""
|
| 23 |
|
| 24 |
_CITATION = ""
|
|
@@ -33,6 +41,35 @@ def _safe_div(num, den, zero_div=0.0):
|
|
| 33 |
return out
|
| 34 |
|
| 35 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
class BalancedAccuracyMultilabel(evaluate.Metric):
|
| 37 |
def _info(self):
|
| 38 |
return evaluate.MetricInfo(
|
|
@@ -42,7 +79,7 @@ class BalancedAccuracyMultilabel(evaluate.Metric):
|
|
| 42 |
features=datasets.Features(
|
| 43 |
{
|
| 44 |
"predictions": datasets.Sequence(datasets.Value("float64")),
|
| 45 |
-
"references":
|
| 46 |
}
|
| 47 |
),
|
| 48 |
)
|
|
@@ -52,34 +89,97 @@ class BalancedAccuracyMultilabel(evaluate.Metric):
|
|
| 52 |
predictions,
|
| 53 |
references,
|
| 54 |
from_probas: bool = False,
|
| 55 |
-
threshold: float = 0.5,
|
| 56 |
zero_division: float = 0.0,
|
|
|
|
|
|
|
|
|
|
| 57 |
return_per_label: bool = False,
|
| 58 |
):
|
| 59 |
-
|
| 60 |
y_pred_in = np.asarray(predictions, dtype=float)
|
| 61 |
|
| 62 |
-
if
|
| 63 |
raise ValueError("Multilabel expects 2D arrays of shape (N, L).")
|
| 64 |
-
if
|
| 65 |
-
raise ValueError(f"Shape mismatch: references {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
|
| 67 |
-
|
|
|
|
| 68 |
|
| 69 |
L = y_true.shape[1]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
per_label_ba = []
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 83 |
if return_per_label:
|
| 84 |
-
out["per_label_ba"] = per_label_ba
|
| 85 |
return out
|
|
|
|
| 4 |
|
| 5 |
_DESCRIPTION = """
|
| 6 |
Multilabel Balanced Accuracy:
|
| 7 |
+
Treat each label as a binary task and compute BA = (TPR + TNR)/2, then average over labels.
|
| 8 |
+
|
| 9 |
+
Extras:
|
| 10 |
+
- average={'macro','weighted','micro'} (default: macro)
|
| 11 |
+
- threshold='auto' (per-label Youden's J) or float in (0,1)
|
| 12 |
+
- class_mask=[...] (evaluate a subset of labels)
|
| 13 |
+
- ignore_index to skip unlabeled samples (e.g., -100)
|
| 14 |
"""
|
| 15 |
|
| 16 |
_KWARGS_DESCRIPTION = """
|
| 17 |
Args:
|
| 18 |
predictions: list[list[int or float]] of shape (N, L).
|
| 19 |
If from_probas=True, values are probabilities in [0,1]; otherwise 0/1 labels.
|
| 20 |
+
references: list[list[int or float]] of shape (N, L). 0/1 labels; ignore_index is allowed.
|
| 21 |
from_probas: bool, default False. If True, binarize predictions with `threshold`.
|
| 22 |
+
threshold: float in (0,1) or 'auto' (per-label). Default 0.5 if from_probas=True.
|
| 23 |
+
zero_division: float, default 0.0. Used when denominator is 0 (no positives/negatives).
|
| 24 |
+
average: 'macro'|'weighted'|'micro', default 'macro'.
|
| 25 |
+
class_mask: Optional[list[int]] — only average over these label indices.
|
| 26 |
+
ignore_index: int | None, default None. If set, samples with reference==ignore_index are skipped.
|
| 27 |
+
return_per_label: bool, default False — also return per-label BA list (after masking).
|
| 28 |
Returns:
|
| 29 |
+
dict: {"balanced_accuracy": float, optional "per_label_ba": list[float], optional "per_label_thresholds": list[float]}
|
| 30 |
"""
|
| 31 |
|
| 32 |
_CITATION = ""
|
|
|
|
| 41 |
return out
|
| 42 |
|
| 43 |
|
| 44 |
+
def _binary_ba(tp, fn, tn, fp, zero_div):
|
| 45 |
+
tpr = _safe_div(tp, tp + fn, zero_div)
|
| 46 |
+
tnr = _safe_div(tn, tn + fp, zero_div)
|
| 47 |
+
return 0.5 * (tpr + tnr)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def _best_threshold_per_label(y_true_col, prob_col, zero_div):
|
| 51 |
+
"""Return (t_opt, ba_opt) using Youden's J for a single label."""
|
| 52 |
+
p = np.asarray(prob_col, dtype=float)
|
| 53 |
+
uniq = np.unique(p)
|
| 54 |
+
if uniq.size == 1:
|
| 55 |
+
candidates = [float(uniq[0])]
|
| 56 |
+
else:
|
| 57 |
+
mids = (uniq[:-1] + uniq[1:]) / 2.0
|
| 58 |
+
candidates = [uniq[0] - 1e-12] + mids.tolist() + [uniq[-1] + 1e-12]
|
| 59 |
+
best_t, best_ba = None, -1.0
|
| 60 |
+
yt = (np.asarray(y_true_col) == 1).astype(int)
|
| 61 |
+
for t in candidates:
|
| 62 |
+
yp = (p >= t).astype(int)
|
| 63 |
+
tp = float(((yt == 1) & (yp == 1)).sum())
|
| 64 |
+
fn = float(((yt == 1) & (yp == 0)).sum())
|
| 65 |
+
tn = float(((yt == 0) & (yp == 0)).sum())
|
| 66 |
+
fp = float(((yt == 0) & (yp == 1)).sum())
|
| 67 |
+
ba = _binary_ba(tp, fn, tn, fp, zero_div)
|
| 68 |
+
if ba > best_ba or (abs(ba - best_ba) < 1e-12 and abs(t - 0.5) < abs((best_t or 0.5) - 0.5)):
|
| 69 |
+
best_ba, best_t = ba, float(t)
|
| 70 |
+
return best_t, best_ba
|
| 71 |
+
|
| 72 |
+
|
| 73 |
class BalancedAccuracyMultilabel(evaluate.Metric):
|
| 74 |
def _info(self):
|
| 75 |
return evaluate.MetricInfo(
|
|
|
|
| 79 |
features=datasets.Features(
|
| 80 |
{
|
| 81 |
"predictions": datasets.Sequence(datasets.Value("float64")),
|
| 82 |
+
"references": datasets.Sequence(datasets.Value("float64")),
|
| 83 |
}
|
| 84 |
),
|
| 85 |
)
|
|
|
|
| 89 |
predictions,
|
| 90 |
references,
|
| 91 |
from_probas: bool = False,
|
| 92 |
+
threshold: float | str = 0.5,
|
| 93 |
zero_division: float = 0.0,
|
| 94 |
+
average: str = "macro",
|
| 95 |
+
class_mask: list[int] | None = None,
|
| 96 |
+
ignore_index: int | None = None,
|
| 97 |
return_per_label: bool = False,
|
| 98 |
):
|
| 99 |
+
y_true_in = np.asarray(references, dtype=float)
|
| 100 |
y_pred_in = np.asarray(predictions, dtype=float)
|
| 101 |
|
| 102 |
+
if y_true_in.ndim != 2 or y_pred_in.ndim != 2:
|
| 103 |
raise ValueError("Multilabel expects 2D arrays of shape (N, L).")
|
| 104 |
+
if y_true_in.shape != y_pred_in.shape:
|
| 105 |
+
raise ValueError(f"Shape mismatch: references {y_true_in.shape} vs predictions {y_pred_in.shape}.")
|
| 106 |
+
|
| 107 |
+
if ignore_index is not None:
|
| 108 |
+
mask = (y_true_in != ignore_index)
|
| 109 |
+
valid = mask.astype(bool)
|
| 110 |
+
else:
|
| 111 |
+
valid = np.ones_like(y_true_in, dtype=bool)
|
| 112 |
|
| 113 |
+
y_true = (y_true_in == 1).astype(int)
|
| 114 |
+
probs = y_pred_in
|
| 115 |
|
| 116 |
L = y_true.shape[1]
|
| 117 |
+
labels = list(range(L))
|
| 118 |
+
if class_mask:
|
| 119 |
+
labels = [j for j in class_mask if 0 <= j < L]
|
| 120 |
+
if not labels:
|
| 121 |
+
return {"balanced_accuracy": float("nan")}
|
| 122 |
+
|
| 123 |
per_label_ba = []
|
| 124 |
+
per_label_thr = []
|
| 125 |
+
|
| 126 |
+
if from_probas and threshold == "auto":
|
| 127 |
+
for j in labels:
|
| 128 |
+
vmask = valid[:, j]
|
| 129 |
+
t_opt, ba_opt = _best_threshold_per_label(y_true[vmask, j], probs[vmask, j], zero_division)
|
| 130 |
+
per_label_ba.append(float(ba_opt))
|
| 131 |
+
per_label_thr.append(float(t_opt))
|
| 132 |
+
else:
|
| 133 |
+
if from_probas:
|
| 134 |
+
thr = 0.5 if threshold is None else float(threshold)
|
| 135 |
+
y_pred = (probs >= thr).astype(int)
|
| 136 |
+
else:
|
| 137 |
+
y_pred = probs.astype(int)
|
| 138 |
+
|
| 139 |
+
for j in labels:
|
| 140 |
+
vmask = valid[:, j]
|
| 141 |
+
yt, yp = y_true[vmask, j], y_pred[vmask, j]
|
| 142 |
+
tp = float(((yt == 1) & (yp == 1)).sum())
|
| 143 |
+
fn = float(((yt == 1) & (yp == 0)).sum())
|
| 144 |
+
tn = float(((yt == 0) & (yp == 0)).sum())
|
| 145 |
+
fp = float(((yt == 0) & (yp == 1)).sum())
|
| 146 |
+
per_label_ba.append(float(_binary_ba(tp, fn, tn, fp, zero_division)))
|
| 147 |
+
|
| 148 |
+
per_label_ba = np.asarray(per_label_ba, dtype=float)
|
| 149 |
+
|
| 150 |
+
if average == "macro":
|
| 151 |
+
score = float(np.mean(per_label_ba)) if per_label_ba.size else float("nan")
|
| 152 |
+
elif average == "weighted":
|
| 153 |
+
weights = []
|
| 154 |
+
for j in labels:
|
| 155 |
+
vmask = valid[:, j]
|
| 156 |
+
weights.append(int(y_true[vmask, j].sum()))
|
| 157 |
+
weights = np.asarray(weights, dtype=float)
|
| 158 |
+
if weights.sum() == 0:
|
| 159 |
+
score = float(np.mean(per_label_ba))
|
| 160 |
+
else:
|
| 161 |
+
score = float(np.average(per_label_ba, weights=weights))
|
| 162 |
+
elif average == "micro":
|
| 163 |
+
TP = FP = TN = FN = 0.0
|
| 164 |
+
for j in labels:
|
| 165 |
+
vmask = valid[:, j]
|
| 166 |
+
yt = y_true[vmask, j]
|
| 167 |
+
if from_probas and threshold == "auto":
|
| 168 |
+
t = per_label_thr[labels.index(j)]
|
| 169 |
+
yp = (probs[vmask, j] >= t).astype(int)
|
| 170 |
+
else:
|
| 171 |
+
yp = y_pred[vmask, j] if 'y_pred' in locals() else (probs[vmask, j] >= 0.5).astype(int)
|
| 172 |
+
TP += float(((yt == 1) & (yp == 1)).sum())
|
| 173 |
+
FN += float(((yt == 1) & (yp == 0)).sum())
|
| 174 |
+
TN += float(((yt == 0) & (yp == 0)).sum())
|
| 175 |
+
FP += float(((yt == 0) & (yp == 1)).sum())
|
| 176 |
+
score = float(_binary_ba(TP, FN, TN, FP, zero_division))
|
| 177 |
+
else:
|
| 178 |
+
raise ValueError("average must be one of {'macro','weighted','micro'}.")
|
| 179 |
+
|
| 180 |
+
out = {"balanced_accuracy": score}
|
| 181 |
+
if from_probas and threshold == "auto":
|
| 182 |
+
out["per_label_thresholds"] = [float(x) for x in per_label_thr]
|
| 183 |
if return_per_label:
|
| 184 |
+
out["per_label_ba"] = [float(x) for x in per_label_ba]
|
| 185 |
return out
|