balanced-accuracy / balanced_topk_accuracy.py
OliverOnHF's picture
Upload 3 files
e99840d verified
import numpy as np
import evaluate
import datasets
_DESCRIPTION = """
Balanced (macro) Top-K Accuracy for multiclass classification.
For each class c, compute recall@k (fraction of samples of class c whose top-k predictions contain c),
then macro-average over classes. Accepts (N, K) score/prob arrays.
Supports sample_weight to compute weighted recalls per class.
"""
_KWARGS_DESCRIPTION = """
Args:
predictions: 2D array-like of shape (N, K) with class scores/probabilities.
references: 1D list/array of integer labels in [0, K-1].
k: int, top-k (default 1). If None, use k_list instead.
k_list: Optional[list[int]] to compute multiple ks at once (e.g., [1,5]).
class_mask: Optional[list[int]] — only average over these classes (e.g., tail classes).
sample_weight: Optional[list[float]] — per-sample weights.
zero_division: float, default 0.0.
return_per_class: bool, default False. If True, also return per-class recalls@k.
"""
_CITATION = ""
def _div(a, b, zero_div=0.0):
return (a / b) if b != 0 else float(zero_div)
class BalancedTopKAccuracy(evaluate.Metric):
def _info(self):
return evaluate.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{"predictions": datasets.Sequence(datasets.Value("float64")),
"references": datasets.Value("int64")}
),
)
def _compute(
self,
predictions,
references,
k: int | None = 1,
k_list: list[int] | None = None,
class_mask: list[int] | None = None,
sample_weight: list[float] | None = None,
zero_division: float = 0.0,
return_per_class: bool = False,
):
y_true = np.asarray(references, dtype=int)
scores = np.asarray(predictions, dtype=float)
if scores.ndim != 2 or y_true.ndim != 1:
raise ValueError("`predictions` must be (N, K) scores; `references` must be 1D labels.")
N, K = scores.shape
if y_true.shape[0] != N:
raise ValueError(f"Length mismatch: references {y_true.shape[0]} vs predictions {N}.")
if not np.all(np.isfinite(scores)):
raise ValueError("`predictions` contains NaN/Inf.")
if (y_true < 0).any() or (y_true >= K).any():
raise ValueError(f"`references` must be within [0, {K-1}] for given predictions shape (N,{K}).")
w = None
if sample_weight is not None:
w = np.asarray(sample_weight, dtype=float)
if w.shape[0] != N:
raise ValueError("`sample_weight` length must match number of samples.")
ks_in = k_list if k_list is not None else [k if k is not None else 1]
ks = sorted(set(int(x) for x in ks_in if x is not None and int(x) >= 1))
if not ks:
raise ValueError("Provide a positive integer `k` or a non-empty `k_list`.")
ks = [min(kk, K) for kk in ks]
if class_mask is None or len(class_mask) == 0:
classes = list(range(K))
else:
classes = [c for c in class_mask if 0 <= c < K]
if not classes:
return {"balanced_topk_accuracy": float("nan"), "reason": "empty_class_mask_after_filtering"}
sorted_idx = np.argsort(-scores, axis=1)
results = {}
per_class = {}
for kk in ks:
topk = sorted_idx[:, :kk]
recalls = []
for c in classes:
mask_c = (y_true == c)
denom = float(w[mask_c].sum()) if w is not None else float(mask_c.sum())
if denom == 0:
recalls.append(float(zero_division))
continue
hit_mask = np.any(topk[mask_c] == c, axis=1)
hits = float(w[mask_c][hit_mask].sum()) if w is not None else float(hit_mask.sum())
recalls.append(_div(hits, denom, zero_division))
ba_k = float(np.mean(recalls)) if recalls else float("nan")
results[kk] = ba_k
if return_per_class:
per_class[kk] = [float(x) for x in recalls]
out = {"balanced_topk_accuracy": results[ks[0]] if len(ks) == 1 else results}
if return_per_class:
out["per_class_recall"] = per_class if len(ks) > 1 else per_class[ks[0]]
return out