Upload balanced_topk_accuracy.py
Browse files- balanced_topk_accuracy.py +88 -0
balanced_topk_accuracy.py
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import evaluate
|
| 3 |
+
import datasets
|
| 4 |
+
|
| 5 |
+
_DESCRIPTION = """
|
| 6 |
+
Balanced (macro) Top-K Accuracy for multiclass classification.
|
| 7 |
+
For each class c, compute recall@k (fraction of samples of class c where c is in top-k predictions),
|
| 8 |
+
then macro-average over classes. Accepts (N, K) score/prob arrays.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
_KWARGS_DESCRIPTION = """
|
| 12 |
+
Args:
|
| 13 |
+
predictions: 2D array-like of shape (N, K) with class scores/probabilities.
|
| 14 |
+
references: 1D list/array of integer labels in [0, K-1].
|
| 15 |
+
k: int, top-k (default 1). If None, use k_list instead.
|
| 16 |
+
k_list: Optional[list[int]] to compute multiple ks at once (e.g., [1,5]).
|
| 17 |
+
zero_division: float, default 0.0. Used when a class has no positive samples.
|
| 18 |
+
return_per_class: bool, default False. If True, also return per-class recalls@k.
|
| 19 |
+
Returns:
|
| 20 |
+
dict with {"balanced_topk_accuracy": float or dict[int,float]}.
|
| 21 |
+
If k_list provided, returns a dict mapping k -> score. Optionally adds "per_class_recall".
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
_CITATION = ""
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class BalancedTopKAccuracy(evaluate.Metric):
|
| 28 |
+
def _info(self):
|
| 29 |
+
return evaluate.MetricInfo(
|
| 30 |
+
description=_DESCRIPTION,
|
| 31 |
+
citation=_CITATION,
|
| 32 |
+
inputs_description=_KWARGS_DESCRIPTION,
|
| 33 |
+
features=datasets.Features(
|
| 34 |
+
{
|
| 35 |
+
"predictions": datasets.Sequence(datasets.Value("float64")),
|
| 36 |
+
"references": datasets.Value("int64"),
|
| 37 |
+
}
|
| 38 |
+
),
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
def _compute(
|
| 42 |
+
self,
|
| 43 |
+
predictions,
|
| 44 |
+
references,
|
| 45 |
+
k: int | None = 1,
|
| 46 |
+
k_list: list[int] | None = None,
|
| 47 |
+
zero_division: float = 0.0,
|
| 48 |
+
return_per_class: bool = False,
|
| 49 |
+
):
|
| 50 |
+
y_true = np.asarray(references, dtype=int)
|
| 51 |
+
scores = np.asarray(predictions, dtype=float)
|
| 52 |
+
|
| 53 |
+
if scores.ndim != 2 or y_true.ndim != 1:
|
| 54 |
+
raise ValueError("predictions must be (N, K) scores; references must be 1D labels.")
|
| 55 |
+
N, K = scores.shape
|
| 56 |
+
if y_true.shape[0] != N:
|
| 57 |
+
raise ValueError(f"Shape mismatch: references length {y_true.shape[0]} vs predictions {N}.")
|
| 58 |
+
|
| 59 |
+
ks = (k_list if k_list is not None else [k if k is not None else 1])
|
| 60 |
+
ks = sorted(set(int(x) for x in ks if x is not None and x >= 1))
|
| 61 |
+
if not ks:
|
| 62 |
+
raise ValueError("Provide a positive integer k or a non-empty k_list.")
|
| 63 |
+
|
| 64 |
+
top_indices = np.argpartition(-scores, kth=np.maximum(ks).min()-1 if len(ks)==1 else np.max(ks)-1, axis=1)
|
| 65 |
+
sorted_idx = np.argsort(-scores, axis=1)
|
| 66 |
+
results = {}
|
| 67 |
+
per_class = {}
|
| 68 |
+
|
| 69 |
+
for kk in ks:
|
| 70 |
+
topk = sorted_idx[:, :kk]
|
| 71 |
+
recalls = []
|
| 72 |
+
for c in range(K):
|
| 73 |
+
mask = (y_true == c)
|
| 74 |
+
denom = int(mask.sum())
|
| 75 |
+
if denom == 0:
|
| 76 |
+
recalls.append(float(zero_division))
|
| 77 |
+
continue
|
| 78 |
+
hits = int(np.any(topk[mask] == c, axis=1).sum())
|
| 79 |
+
recalls.append(hits / denom)
|
| 80 |
+
ba_k = float(np.mean(recalls))
|
| 81 |
+
results[kk] = ba_k
|
| 82 |
+
if return_per_class:
|
| 83 |
+
per_class[kk] = recalls
|
| 84 |
+
|
| 85 |
+
out = {"balanced_topk_accuracy": results[ks[0]] if len(ks)==1 else results}
|
| 86 |
+
if return_per_class:
|
| 87 |
+
out["per_class_recall"] = per_class if len(ks)>1 else per_class[ks[0]]
|
| 88 |
+
return out
|