Delete balanced-accuracy.py
Browse files- balanced-accuracy.py +0 -175
balanced-accuracy.py
DELETED
|
@@ -1,175 +0,0 @@
|
|
| 1 |
-
import numpy as np
|
| 2 |
-
import evaluate
|
| 3 |
-
import datasets
|
| 4 |
-
|
| 5 |
-
_DESCRIPTION = """
|
| 6 |
-
Balanced Accuracy for imbalanced classification.
|
| 7 |
-
|
| 8 |
-
Definitions
|
| 9 |
-
- Binary: (TPR + TNR) / 2
|
| 10 |
-
- Multiclass: macro-average of per-class recall
|
| 11 |
-
|
| 12 |
-
Extras
|
| 13 |
-
- threshold="auto": pick the best threshold for binary probabilities (Youden's J)
|
| 14 |
-
- ignore_index: skip unlabeled samples (e.g., -100)
|
| 15 |
-
- adjusted=True: sklearn-style chance correction
|
| 16 |
-
- return_per_class=True: also return per-class recalls (multiclass)
|
| 17 |
-
- class_mask=[...] (multiclass): average over a subset of classes
|
| 18 |
-
"""
|
| 19 |
-
|
| 20 |
-
_KWARGS_DESCRIPTION = """
|
| 21 |
-
Args:
|
| 22 |
-
predictions: 1D list/array.
|
| 23 |
-
Binary: integer labels {0,1}, or probabilities in [0,1] (if threshold given)
|
| 24 |
-
Multiclass: integer labels {0..K-1}
|
| 25 |
-
references: 1D list/array of integer labels.
|
| 26 |
-
task: "binary" | "multiclass" (Default: "binary")
|
| 27 |
-
num_classes: int, for multiclass; inferred if labels are 0..K-1.
|
| 28 |
-
adjusted: bool, default False. (binary: 2*BA-1; multiclass: (BA-1/K)/(1-1/K))
|
| 29 |
-
zero_division: float, default 0.0. Value used when a denominator is 0.
|
| 30 |
-
threshold: float in (0,1) or "auto" (binary only).
|
| 31 |
-
- float: binarize probs via (prob >= threshold)
|
| 32 |
-
- "auto": choose threshold maximizing BA on given data (Youden's J)
|
| 33 |
-
- if None: treat `predictions` as 0/1 labels (no binarization)
|
| 34 |
-
ignore_index: int | None, default None. If set, samples with reference == ignore_index are skipped.
|
| 35 |
-
return_per_class: bool, default False (multiclass) — also return per-class recalls list.
|
| 36 |
-
class_mask: Optional[list[int]] — only average over these classes (multiclass).
|
| 37 |
-
Returns:
|
| 38 |
-
dict with at least {"balanced_accuracy": float}. If threshold="auto" (binary) adds {"optimal_threshold": float}.
|
| 39 |
-
If return_per_class=True (multiclass) adds {"per_class_recall": list[float]}.
|
| 40 |
-
"""
|
| 41 |
-
|
| 42 |
-
_CITATION = ""
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
def _safe_div(num, den, zero_div=0.0):
|
| 46 |
-
num = np.asarray(num, dtype=float)
|
| 47 |
-
den = np.asarray(den, dtype=float)
|
| 48 |
-
out = np.full_like(num, float(zero_div))
|
| 49 |
-
mask = den != 0
|
| 50 |
-
out[mask] = num[mask] / den[mask]
|
| 51 |
-
return out
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
def _binary_ba_from_labels(y_true_i, y_pred_i, zero_div):
|
| 55 |
-
tp = float(((y_true_i == 1) & (y_pred_i == 1)).sum())
|
| 56 |
-
fn = float(((y_true_i == 1) & (y_pred_i == 0)).sum())
|
| 57 |
-
tn = float(((y_true_i == 0) & (y_pred_i == 0)).sum())
|
| 58 |
-
fp = float(((y_true_i == 0) & (y_pred_i == 1)).sum())
|
| 59 |
-
tpr = _safe_div(tp, tp + fn, zero_div)
|
| 60 |
-
tnr = _safe_div(tn, tn + fp, zero_div)
|
| 61 |
-
return 0.5 * (tpr + tnr)
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
def _binary_find_best_threshold(y_true, probs, zero_div):
|
| 65 |
-
p = np.asarray(probs, dtype=float)
|
| 66 |
-
uniq = np.unique(p)
|
| 67 |
-
candidates = []
|
| 68 |
-
if uniq.size == 1:
|
| 69 |
-
candidates = [uniq[0]]
|
| 70 |
-
else:
|
| 71 |
-
mids = (uniq[:-1] + uniq[1:]) / 2.0
|
| 72 |
-
candidates = [uniq[0] - 1e-12] + mids.tolist() + [uniq[-1] + 1e-12]
|
| 73 |
-
|
| 74 |
-
best_t, best_ba = None, -1.0
|
| 75 |
-
for t in candidates:
|
| 76 |
-
y_pred = (p >= t).astype(int)
|
| 77 |
-
ba = _binary_ba_from_labels(y_true, y_pred, zero_div)
|
| 78 |
-
if ba > best_ba or (abs(ba - best_ba) < 1e-12 and abs(t - 0.5) < abs(best_t - 0.5)):
|
| 79 |
-
best_ba, best_t = ba, t
|
| 80 |
-
return float(best_t), float(best_ba)
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
class BalancedAccuracy(evaluate.Metric):
|
| 84 |
-
def _info(self):
|
| 85 |
-
return evaluate.MetricInfo(
|
| 86 |
-
description=_DESCRIPTION,
|
| 87 |
-
citation=_CITATION,
|
| 88 |
-
inputs_description=_KWARGS_DESCRIPTION,
|
| 89 |
-
features=datasets.Features(
|
| 90 |
-
{
|
| 91 |
-
"predictions": datasets.Value("float64"),
|
| 92 |
-
"references": datasets.Value("float64"),
|
| 93 |
-
}
|
| 94 |
-
),
|
| 95 |
-
reference_urls=[
|
| 96 |
-
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.balanced_accuracy_score.html"
|
| 97 |
-
],
|
| 98 |
-
)
|
| 99 |
-
|
| 100 |
-
def _compute(
|
| 101 |
-
self,
|
| 102 |
-
predictions,
|
| 103 |
-
references,
|
| 104 |
-
task: str = "binary", # "binary" | "multiclass"
|
| 105 |
-
num_classes: int | None = None,
|
| 106 |
-
adjusted: bool = False,
|
| 107 |
-
zero_division: float = 0.0,
|
| 108 |
-
threshold: float | str | None = None, # binary only: float or "auto" or None
|
| 109 |
-
ignore_index: int | None = None,
|
| 110 |
-
return_per_class: bool = False,
|
| 111 |
-
class_mask: list[int] | None = None,
|
| 112 |
-
):
|
| 113 |
-
y_true = np.asarray(references).astype(int)
|
| 114 |
-
y_pred_in = np.asarray(predictions)
|
| 115 |
-
|
| 116 |
-
if ignore_index is not None:
|
| 117 |
-
mask = y_true != ignore_index
|
| 118 |
-
y_true = y_true[mask]
|
| 119 |
-
y_pred_in = y_pred_in[mask]
|
| 120 |
-
if y_true.size == 0:
|
| 121 |
-
return {"balanced_accuracy": float("nan")}
|
| 122 |
-
|
| 123 |
-
if y_true.ndim != 1:
|
| 124 |
-
raise ValueError("`references` must be 1D integer labels, e.g., [0,1,1,0].")
|
| 125 |
-
if y_pred_in.ndim != 1:
|
| 126 |
-
raise ValueError("`predictions` must be 1D labels or probabilities (for binary).")
|
| 127 |
-
|
| 128 |
-
# ---- Binary ----
|
| 129 |
-
if task == "binary":
|
| 130 |
-
is_prob_like = not np.isin(np.unique(y_pred_in), [0.0, 1.0]).all()
|
| 131 |
-
|
| 132 |
-
if is_prob_like:
|
| 133 |
-
if threshold == "auto":
|
| 134 |
-
t_opt, ba = _binary_find_best_threshold(y_true, y_pred_in, zero_division)
|
| 135 |
-
if adjusted:
|
| 136 |
-
ba = 2 * ba - 1
|
| 137 |
-
return {"balanced_accuracy": float(ba), "optimal_threshold": float(t_opt)}
|
| 138 |
-
else:
|
| 139 |
-
t = 0.5 if (threshold is None) else float(threshold)
|
| 140 |
-
y_pred = (y_pred_in >= t).astype(int)
|
| 141 |
-
else:
|
| 142 |
-
y_pred = y_pred_in.astype(int)
|
| 143 |
-
|
| 144 |
-
ba = _binary_ba_from_labels(y_true, y_pred, zero_division)
|
| 145 |
-
if adjusted:
|
| 146 |
-
ba = 2 * ba - 1
|
| 147 |
-
return {"balanced_accuracy": float(ba)}
|
| 148 |
-
|
| 149 |
-
# ---- Multiclass ----
|
| 150 |
-
if task != "multiclass":
|
| 151 |
-
raise ValueError("`task` must be 'binary' or 'multiclass'.")
|
| 152 |
-
|
| 153 |
-
y_pred = y_pred_in.astype(int)
|
| 154 |
-
if num_classes is None:
|
| 155 |
-
num_classes = int(max(y_true.max() if y_true.size else 0, y_pred.max() if y_pred.size else 0)) + 1
|
| 156 |
-
|
| 157 |
-
classes = list(range(num_classes))
|
| 158 |
-
if class_mask is not None and len(class_mask) > 0:
|
| 159 |
-
classes = [c for c in class_mask if 0 <= c < num_classes]
|
| 160 |
-
if len(classes) == 0:
|
| 161 |
-
return {"balanced_accuracy": float("nan")}
|
| 162 |
-
|
| 163 |
-
tp = np.array([(y_pred[y_true == c] == c).sum() for c in classes], dtype=float)
|
| 164 |
-
fn = np.array([(y_pred[y_true == c] != c).sum() for c in classes], dtype=float)
|
| 165 |
-
recall_c = _safe_div(tp, tp + fn, zero_division)
|
| 166 |
-
ba = float(recall_c.mean())
|
| 167 |
-
|
| 168 |
-
if adjusted:
|
| 169 |
-
chance = 1.0 / float(len(classes))
|
| 170 |
-
ba = float((ba - chance) / (1.0 - chance))
|
| 171 |
-
|
| 172 |
-
out = {"balanced_accuracy": ba}
|
| 173 |
-
if return_per_class:
|
| 174 |
-
out["per_class_recall"] = recall_c.tolist()
|
| 175 |
-
return out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|