balanced-accuracy / balanced_accuracy_multilabel.py
OliverOnHF's picture
Upload 3 files
e99840d verified
import numpy as np
import evaluate
import datasets
_DESCRIPTION = """
Multilabel Balanced Accuracy:
Treat each label as a binary task and compute BA = (TPR + TNR)/2, then average over labels.
Extras:
- average={'macro','weighted','micro'} (default: macro)
- threshold='auto' (per-label Youden's J) or float in (0,1)
- class_mask=[...] (evaluate a subset of labels)
- ignore_index to skip unlabeled samples (e.g., -100)
- support_per_label: when return_per_label=True, also return true positive counts per label (after masking)
- sample_weight: per-sample weights; confusion counts become weighted sums for each label
"""
_KWARGS_DESCRIPTION = """
Args:
predictions: list[list[int or float]] of shape (N, L).
If from_probas=True, values are probabilities in [0,1]; otherwise 0/1 labels.
references: list[list[int or float]] of shape (N, L). 0/1 labels; ignore_index is allowed.
from_probas: bool, default False.
threshold: float in (0,1) or 'auto' (per-label). Default 0.5 if from_probas=True.
zero_division: float, default 0.0.
average: 'macro'|'weighted'|'micro', default 'macro'.
class_mask: Optional[list[int]] — only average over these label indices.
ignore_index: int | None, default None.
return_per_label: bool, default False.
sample_weight: Optional[list[float]] — per-sample weights.
"""
_CITATION = ""
def _safe_div(num, den, zero_div=0.0):
num = np.asarray(num, dtype=float)
den = np.asarray(den, dtype=float)
out = np.full_like(num, float(zero_div))
mask = den != 0
out[mask] = num[mask] / den[mask]
return out
def _binary_ba(tp, fn, tn, fp, zero_div):
tpr = _safe_div(tp, tp + fn, zero_div)
tnr = _safe_div(tn, tn + fp, zero_div)
return 0.5 * (tpr + tnr)
def _best_threshold_per_label(y_true_col, prob_col, zero_div):
p = np.asarray(prob_col, dtype=float)
uniq = np.unique(p)
if uniq.size == 1:
candidates = [float(uniq[0])]
else:
mids = (uniq[:-1] + uniq[1:]) / 2.0
candidates = [float(uniq[0] - 1e-12), *mids.tolist(), float(uniq[-1] + 1e-12)]
best_t, best_ba = None, -1.0
yt = (np.asarray(y_true_col) == 1).astype(int)
for t in candidates:
yp = (p >= t).astype(int)
tp = float(((yt == 1) & (yp == 1)).sum())
fn = float(((yt == 1) & (yp == 0)).sum())
tn = float(((yt == 0) & (yp == 0)).sum())
fp = float(((yt == 0) & (yp == 1)).sum())
ba = _binary_ba(tp, fn, tn, fp, zero_div)
if (ba > best_ba) or (abs(ba - best_ba) < 1e-12 and (best_t is None or abs(t - 0.5) < abs(best_t - 0.5))):
best_ba, best_t = float(ba), float(t)
return best_t, best_ba
class BalancedAccuracyMultilabel(evaluate.Metric):
def _info(self):
return evaluate.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("float64")),
"references": datasets.Sequence(datasets.Value("float64")),
}
),
)
def _compute(
self,
predictions,
references,
from_probas: bool = False,
threshold: float | str = 0.5,
zero_division: float = 0.0,
average: str = "macro",
class_mask: list[int] | None = None,
ignore_index: int | None = None,
return_per_label: bool = False,
sample_weight: list[float] | None = None,
):
y_true_in = np.asarray(references, dtype=float)
y_pred_in = np.asarray(predictions, dtype=float)
# basic checks
if y_true_in.ndim != 2 or y_pred_in.ndim != 2:
raise ValueError("Multilabel expects 2D arrays of shape (N, L).")
if y_true_in.shape != y_pred_in.shape:
raise ValueError(f"Shape mismatch: references {y_true_in.shape} vs predictions {y_pred_in.shape}.")
if not np.all(np.isfinite(y_pred_in)):
raise ValueError("`predictions` contains NaN/Inf.")
if average not in {"macro", "weighted", "micro"}:
raise ValueError("`average` must be one of {'macro','weighted','micro'}.")
# validity mask
if ignore_index is not None:
valid = (y_true_in != ignore_index)
else:
valid = np.ones_like(y_true_in, dtype=bool)
y_true = (y_true_in == 1).astype(int)
probs = y_pred_in
N, L = y_true.shape
# weights
if sample_weight is not None:
w_in = np.asarray(sample_weight, dtype=float)
if w_in.shape[0] != N:
raise ValueError("`sample_weight` length must match number of samples.")
else:
w_in = None
labels = list(range(L))
if class_mask:
labels = [j for j in class_mask if 0 <= j < L]
if not labels:
return {"balanced_accuracy": float("nan"), "reason": "empty_class_mask_after_filtering"}
# input value checks
if from_probas:
if threshold != "auto":
thr = 0.5 if threshold is None else float(threshold)
if not (0.0 < thr < 1.0):
raise ValueError("`threshold` must be in (0,1) or 'auto' when from_probas=True.")
if np.any((probs < 0) | (probs > 1)):
raise ValueError("When from_probas=True, `predictions` must be in [0,1].")
else:
uniq = np.unique(probs[valid])
if not np.isin(uniq, [0.0, 1.0]).all():
raise ValueError("When from_probas=False, `predictions` must be 0/1 labels.")
# if everything invalid after ignore_index:
if not np.any(valid):
return {"balanced_accuracy": float("nan"), "reason": "empty_after_ignore_index"}
per_label_ba, per_label_thr = [], []
if from_probas and threshold == "auto":
for j in labels:
vmask = valid[:, j]
t_opt, ba_opt = _best_threshold_per_label(y_true[vmask, j], probs[vmask, j], zero_division)
per_label_ba.append(float(ba_opt))
per_label_thr.append(float(t_opt))
else:
y_pred = (probs >= thr).astype(int) if from_probas else probs.astype(int)
for j in labels:
vmask = valid[:, j]
yt, yp = y_true[vmask, j], y_pred[vmask, j]
if w_in is None:
tp = float(((yt == 1) & (yp == 1)).sum())
fn = float(((yt == 1) & (yp == 0)).sum())
tn = float(((yt == 0) & (yp == 0)).sum())
fp = float(((yt == 0) & (yp == 1)).sum())
else:
wv = w_in[vmask]
tp = float(wv[((yt == 1) & (yp == 1))].sum())
fn = float(wv[((yt == 1) & (yp == 0))].sum())
tn = float(wv[((yt == 0) & (yp == 0))].sum())
fp = float(wv[((yt == 0) & (yp == 1))].sum())
per_label_ba.append(float(_binary_ba(tp, fn, tn, fp, zero_division)))
per_label_ba = np.asarray(per_label_ba, dtype=float)
support_per_label = []
for j in labels:
vmask = valid[:, j]
support_per_label.append(int(y_true[vmask, j].sum()))
if average == "macro":
score = float(np.mean(per_label_ba)) if per_label_ba.size else float("nan")
elif average == "weighted":
weights = np.asarray(support_per_label, dtype=float)
score = float(np.average(per_label_ba, weights=weights)) if weights.sum() > 0 else (
float(np.mean(per_label_ba)) if per_label_ba.size else float("nan")
)
else: # micro
TP = FP = TN = FN = 0.0
for j in labels:
vmask = valid[:, j]
yt = y_true[vmask, j]
if from_probas and threshold == "auto":
t = per_label_thr[labels.index(j)]
yp = (probs[vmask, j] >= t).astype(int)
else:
yp = y_pred[vmask, j] if 'y_pred' in locals() else (probs[vmask, j] >= 0.5).astype(int)
if w_in is None:
TP += float(((yt == 1) & (yp == 1)).sum())
FN += float(((yt == 1) & (yp == 0)).sum())
TN += float(((yt == 0) & (yp == 0)).sum())
FP += float(((yt == 0) & (yp == 1)).sum())
else:
wv = w_in[vmask]
TP += float(wv[((yt == 1) & (yp == 1))].sum())
FN += float(wv[((yt == 1) & (yp == 0))].sum())
TN += float(wv[((yt == 0) & (yp == 0))].sum())
FP += float(wv[((yt == 0) & (yp == 1))].sum())
score = float(_binary_ba(TP, FN, TN, FP, zero_division))
out = {"balanced_accuracy": score}
if from_probas and threshold == "auto":
out["per_label_thresholds"] = [float(x) for x in per_label_thr]
if return_per_label:
out["per_label_ba"] = [float(x) for x in per_label_ba]
out["support_per_label"] = support_per_label
return out