Upload 2 files
Browse files- balanced_accuracy.py +178 -0
- balanced_accuracy_multilabel.py +21 -15
balanced_accuracy.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import evaluate
|
| 3 |
+
import datasets
|
| 4 |
+
|
| 5 |
+
_DESCRIPTION = """
|
| 6 |
+
Balanced Accuracy for imbalanced classification.
|
| 7 |
+
|
| 8 |
+
Definitions
|
| 9 |
+
- Binary: (TPR + TNR) / 2
|
| 10 |
+
- Multiclass: macro-average of per-class recall
|
| 11 |
+
|
| 12 |
+
Extras
|
| 13 |
+
- threshold="auto": pick the best threshold for binary probabilities (Youden's J)
|
| 14 |
+
- ignore_index: skip unlabeled samples (e.g., -100)
|
| 15 |
+
- adjusted=True: sklearn-style chance correction
|
| 16 |
+
- return_per_class=True: also return per-class recalls (multiclass)
|
| 17 |
+
- class_mask=[...] (multiclass): average over a subset of classes
|
| 18 |
+
- support_per_class: when return_per_class=True (multiclass), also return true sample counts per class
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
_KWARGS_DESCRIPTION = """
|
| 22 |
+
Args:
|
| 23 |
+
predictions: 1D list/array.
|
| 24 |
+
Binary: integer labels {0,1}, or probabilities in [0,1] (if threshold given)
|
| 25 |
+
Multiclass: integer labels {0..K-1}
|
| 26 |
+
references: 1D list/array of integer labels.
|
| 27 |
+
task: "binary" | "multiclass" (Default: "binary")
|
| 28 |
+
num_classes: int, for multiclass; inferred if labels are 0..K-1.
|
| 29 |
+
adjusted: bool, default False. (binary: 2*BA-1; multiclass: (BA-1/K)/(1-1/K))
|
| 30 |
+
zero_division: float, default 0.0.
|
| 31 |
+
threshold: float in (0,1) or "auto" (binary only).
|
| 32 |
+
- float: binarize probs via (prob >= threshold)
|
| 33 |
+
- "auto": choose threshold maximizing BA on given data (Youden's J)
|
| 34 |
+
- if None: treat `predictions` as 0/1 labels (no binarization)
|
| 35 |
+
ignore_index: int | None. If set, samples with reference == ignore_index are skipped.
|
| 36 |
+
return_per_class: bool, default False — also return per-class recalls list (multiclass).
|
| 37 |
+
class_mask: Optional[list[int]] — only average over these classes (multiclass).
|
| 38 |
+
Returns:
|
| 39 |
+
{"balanced_accuracy": float}
|
| 40 |
+
+ (binary, threshold="auto"): {"optimal_threshold": float}
|
| 41 |
+
+ (multiclass, return_per_class=True):
|
| 42 |
+
{"per_class_recall": list[float], "support_per_class": list[int]}
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
_CITATION = ""
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def _safe_div(num, den, zero_div=0.0):
|
| 49 |
+
num = np.asarray(num, dtype=float)
|
| 50 |
+
den = np.asarray(den, dtype=float)
|
| 51 |
+
out = np.full_like(num, float(zero_div))
|
| 52 |
+
mask = den != 0
|
| 53 |
+
out[mask] = num[mask] / den[mask]
|
| 54 |
+
return out
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def _binary_ba_from_labels(y_true_i, y_pred_i, zero_div):
|
| 58 |
+
tp = float(((y_true_i == 1) & (y_pred_i == 1)).sum())
|
| 59 |
+
fn = float(((y_true_i == 1) & (y_pred_i == 0)).sum())
|
| 60 |
+
tn = float(((y_true_i == 0) & (y_pred_i == 0)).sum())
|
| 61 |
+
fp = float(((y_true_i == 0) & (y_pred_i == 1)).sum())
|
| 62 |
+
tpr = _safe_div(tp, tp + fn, zero_div)
|
| 63 |
+
tnr = _safe_div(tn, tn + fp, zero_div)
|
| 64 |
+
return 0.5 * (tpr + tnr)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def _binary_find_best_threshold(y_true, probs, zero_div):
|
| 68 |
+
p = np.asarray(probs, dtype=float)
|
| 69 |
+
uniq = np.unique(p)
|
| 70 |
+
if uniq.size == 1:
|
| 71 |
+
candidates = [float(uniq[0])]
|
| 72 |
+
else:
|
| 73 |
+
mids = (uniq[:-1] + uniq[1:]) / 2.0
|
| 74 |
+
candidates = [float(uniq[0] - 1e-12), *mids.tolist(), float(uniq[-1] + 1e-12)]
|
| 75 |
+
|
| 76 |
+
best_t, best_ba = None, -1.0
|
| 77 |
+
yt = (np.asarray(y_true) == 1).astype(int)
|
| 78 |
+
for t in candidates:
|
| 79 |
+
yp = (p >= t).astype(int)
|
| 80 |
+
ba = _binary_ba_from_labels(yt, yp, zero_div)
|
| 81 |
+
if (ba > best_ba) or (abs(ba - best_ba) < 1e-12 and (best_t is None or abs(t - 0.5) < abs(best_t - 0.5))):
|
| 82 |
+
best_ba, best_t = float(ba), float(t)
|
| 83 |
+
return best_t, best_ba
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
class BalancedAccuracy(evaluate.Metric):
|
| 87 |
+
def _info(self):
|
| 88 |
+
return evaluate.MetricInfo(
|
| 89 |
+
description=_DESCRIPTION,
|
| 90 |
+
citation=_CITATION,
|
| 91 |
+
inputs_description=_KWARGS_DESCRIPTION,
|
| 92 |
+
features=datasets.Features(
|
| 93 |
+
{"predictions": datasets.Value("float64"),
|
| 94 |
+
"references": datasets.Value("float64")}
|
| 95 |
+
),
|
| 96 |
+
reference_urls=[
|
| 97 |
+
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.balanced_accuracy_score.html"
|
| 98 |
+
],
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
def _compute(
|
| 102 |
+
self,
|
| 103 |
+
predictions,
|
| 104 |
+
references,
|
| 105 |
+
task: str = "binary",
|
| 106 |
+
num_classes: int | None = None,
|
| 107 |
+
adjusted: bool = False,
|
| 108 |
+
zero_division: float = 0.0,
|
| 109 |
+
threshold: float | str | None = None,
|
| 110 |
+
ignore_index: int | None = None,
|
| 111 |
+
return_per_class: bool = False,
|
| 112 |
+
class_mask: list[int] | None = None,
|
| 113 |
+
):
|
| 114 |
+
y_true = np.asarray(references).astype(int)
|
| 115 |
+
y_pred_in = np.asarray(predictions)
|
| 116 |
+
|
| 117 |
+
if ignore_index is not None:
|
| 118 |
+
mask = y_true != ignore_index
|
| 119 |
+
y_true = y_true[mask]
|
| 120 |
+
y_pred_in = y_pred_in[mask]
|
| 121 |
+
if y_true.size == 0:
|
| 122 |
+
return {"balanced_accuracy": float("nan")}
|
| 123 |
+
|
| 124 |
+
if y_true.ndim != 1 or y_pred_in.ndim != 1:
|
| 125 |
+
raise ValueError("`references`/`predictions` must be 1D.")
|
| 126 |
+
|
| 127 |
+
# ---- Binary ----
|
| 128 |
+
if task == "binary":
|
| 129 |
+
is_prob_like = not np.isin(np.unique(y_pred_in), [0.0, 1.0]).all()
|
| 130 |
+
if is_prob_like:
|
| 131 |
+
if threshold == "auto":
|
| 132 |
+
t_opt, ba = _binary_find_best_threshold(y_true, y_pred_in, zero_division)
|
| 133 |
+
if adjusted:
|
| 134 |
+
ba = 2 * ba - 1
|
| 135 |
+
return {"balanced_accuracy": float(ba), "optimal_threshold": float(t_opt)}
|
| 136 |
+
else:
|
| 137 |
+
t = 0.5 if (threshold is None) else float(threshold)
|
| 138 |
+
if not (0.0 < t < 1.0):
|
| 139 |
+
raise ValueError("`threshold` must be in (0,1) or 'auto'.")
|
| 140 |
+
y_pred = (y_pred_in >= t).astype(int)
|
| 141 |
+
else:
|
| 142 |
+
y_pred = y_pred_in.astype(int)
|
| 143 |
+
|
| 144 |
+
ba = _binary_ba_from_labels(y_true, y_pred, zero_division)
|
| 145 |
+
if adjusted:
|
| 146 |
+
ba = 2 * ba - 1
|
| 147 |
+
return {"balanced_accuracy": float(ba)}
|
| 148 |
+
|
| 149 |
+
# ---- Multiclass ----
|
| 150 |
+
if task != "multiclass":
|
| 151 |
+
raise ValueError("`task` must be 'binary' or 'multiclass'.")
|
| 152 |
+
|
| 153 |
+
y_pred = y_pred_in.astype(int)
|
| 154 |
+
if num_classes is None:
|
| 155 |
+
num_classes = int(max(y_true.max() if y_true.size else 0,
|
| 156 |
+
y_pred.max() if y_pred.size else 0)) + 1
|
| 157 |
+
if (y_pred < 0).any() or (y_pred >= num_classes).any():
|
| 158 |
+
raise ValueError(f"`predictions` must be in [0,{num_classes-1}] for multiclass.")
|
| 159 |
+
|
| 160 |
+
classes = list(range(num_classes))
|
| 161 |
+
if class_mask:
|
| 162 |
+
classes = [c for c in class_mask if 0 <= c < num_classes]
|
| 163 |
+
if not classes:
|
| 164 |
+
return {"balanced_accuracy": float("nan")}
|
| 165 |
+
|
| 166 |
+
tp = np.array([(y_pred[y_true == c] == c).sum() for c in classes], dtype=float)
|
| 167 |
+
fn = np.array([(y_pred[y_true == c] != c).sum() for c in classes], dtype=float)
|
| 168 |
+
recall_c = _safe_div(tp, tp + fn, zero_division)
|
| 169 |
+
ba = float(recall_c.mean())
|
| 170 |
+
if adjusted:
|
| 171 |
+
chance = 1.0 / float(len(classes))
|
| 172 |
+
ba = float((ba - chance) / (1.0 - chance))
|
| 173 |
+
|
| 174 |
+
out = {"balanced_accuracy": ba}
|
| 175 |
+
if return_per_class:
|
| 176 |
+
out["per_class_recall"] = recall_c.tolist()
|
| 177 |
+
out["support_per_class"] = [int((y_true == c).sum()) for c in classes]
|
| 178 |
+
return out
|
balanced_accuracy_multilabel.py
CHANGED
|
@@ -11,6 +11,7 @@ Extras:
|
|
| 11 |
- threshold='auto' (per-label Youden's J) or float in (0,1)
|
| 12 |
- class_mask=[...] (evaluate a subset of labels)
|
| 13 |
- ignore_index to skip unlabeled samples (e.g., -100)
|
|
|
|
| 14 |
"""
|
| 15 |
|
| 16 |
_KWARGS_DESCRIPTION = """
|
|
@@ -20,13 +21,15 @@ Args:
|
|
| 20 |
references: list[list[int or float]] of shape (N, L). 0/1 labels; ignore_index is allowed.
|
| 21 |
from_probas: bool, default False. If True, binarize predictions with `threshold`.
|
| 22 |
threshold: float in (0,1) or 'auto' (per-label). Default 0.5 if from_probas=True.
|
| 23 |
-
zero_division: float, default 0.0.
|
| 24 |
average: 'macro'|'weighted'|'micro', default 'macro'.
|
| 25 |
class_mask: Optional[list[int]] — only average over these label indices.
|
| 26 |
-
ignore_index: int | None, default None.
|
| 27 |
return_per_label: bool, default False — also return per-label BA list (after masking).
|
| 28 |
Returns:
|
| 29 |
-
|
|
|
|
|
|
|
| 30 |
"""
|
| 31 |
|
| 32 |
_CITATION = ""
|
|
@@ -55,7 +58,7 @@ def _best_threshold_per_label(y_true_col, prob_col, zero_div):
|
|
| 55 |
candidates = [float(uniq[0])]
|
| 56 |
else:
|
| 57 |
mids = (uniq[:-1] + uniq[1:]) / 2.0
|
| 58 |
-
candidates = [uniq[0] - 1e-12
|
| 59 |
best_t, best_ba = None, -1.0
|
| 60 |
yt = (np.asarray(y_true_col) == 1).astype(int)
|
| 61 |
for t in candidates:
|
|
@@ -65,8 +68,8 @@ def _best_threshold_per_label(y_true_col, prob_col, zero_div):
|
|
| 65 |
tn = float(((yt == 0) & (yp == 0)).sum())
|
| 66 |
fp = float(((yt == 0) & (yp == 1)).sum())
|
| 67 |
ba = _binary_ba(tp, fn, tn, fp, zero_div)
|
| 68 |
-
if ba > best_ba or (abs(ba - best_ba) < 1e-12 and abs(t - 0.5) < abs(
|
| 69 |
-
best_ba, best_t = ba, float(t)
|
| 70 |
return best_t, best_ba
|
| 71 |
|
| 72 |
|
|
@@ -105,8 +108,7 @@ class BalancedAccuracyMultilabel(evaluate.Metric):
|
|
| 105 |
raise ValueError(f"Shape mismatch: references {y_true_in.shape} vs predictions {y_pred_in.shape}.")
|
| 106 |
|
| 107 |
if ignore_index is not None:
|
| 108 |
-
|
| 109 |
-
valid = mask.astype(bool)
|
| 110 |
else:
|
| 111 |
valid = np.ones_like(y_true_in, dtype=bool)
|
| 112 |
|
|
@@ -122,7 +124,7 @@ class BalancedAccuracyMultilabel(evaluate.Metric):
|
|
| 122 |
|
| 123 |
per_label_ba = []
|
| 124 |
per_label_thr = []
|
| 125 |
-
|
| 126 |
if from_probas and threshold == "auto":
|
| 127 |
for j in labels:
|
| 128 |
vmask = valid[:, j]
|
|
@@ -147,16 +149,19 @@ class BalancedAccuracyMultilabel(evaluate.Metric):
|
|
| 147 |
|
| 148 |
per_label_ba = np.asarray(per_label_ba, dtype=float)
|
| 149 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 150 |
if average == "macro":
|
| 151 |
score = float(np.mean(per_label_ba)) if per_label_ba.size else float("nan")
|
| 152 |
elif average == "weighted":
|
| 153 |
-
weights =
|
| 154 |
-
for j in labels:
|
| 155 |
-
vmask = valid[:, j]
|
| 156 |
-
weights.append(int(y_true[vmask, j].sum()))
|
| 157 |
-
weights = np.asarray(weights, dtype=float)
|
| 158 |
if weights.sum() == 0:
|
| 159 |
-
score = float(np.mean(per_label_ba))
|
| 160 |
else:
|
| 161 |
score = float(np.average(per_label_ba, weights=weights))
|
| 162 |
elif average == "micro":
|
|
@@ -182,4 +187,5 @@ class BalancedAccuracyMultilabel(evaluate.Metric):
|
|
| 182 |
out["per_label_thresholds"] = [float(x) for x in per_label_thr]
|
| 183 |
if return_per_label:
|
| 184 |
out["per_label_ba"] = [float(x) for x in per_label_ba]
|
|
|
|
| 185 |
return out
|
|
|
|
| 11 |
- threshold='auto' (per-label Youden's J) or float in (0,1)
|
| 12 |
- class_mask=[...] (evaluate a subset of labels)
|
| 13 |
- ignore_index to skip unlabeled samples (e.g., -100)
|
| 14 |
+
- support_per_label: when return_per_label=True, also return true positive counts per label (after masking)
|
| 15 |
"""
|
| 16 |
|
| 17 |
_KWARGS_DESCRIPTION = """
|
|
|
|
| 21 |
references: list[list[int or float]] of shape (N, L). 0/1 labels; ignore_index is allowed.
|
| 22 |
from_probas: bool, default False. If True, binarize predictions with `threshold`.
|
| 23 |
threshold: float in (0,1) or 'auto' (per-label). Default 0.5 if from_probas=True.
|
| 24 |
+
zero_division: float, default 0.0.
|
| 25 |
average: 'macro'|'weighted'|'micro', default 'macro'.
|
| 26 |
class_mask: Optional[list[int]] — only average over these label indices.
|
| 27 |
+
ignore_index: int | None, default None.
|
| 28 |
return_per_label: bool, default False — also return per-label BA list (after masking).
|
| 29 |
Returns:
|
| 30 |
+
{"balanced_accuracy": float}
|
| 31 |
+
+ (from_probas & threshold='auto'): {"per_label_thresholds": list[float]}
|
| 32 |
+
+ (return_per_label): {"per_label_ba": list[float], "support_per_label": list[int]}
|
| 33 |
"""
|
| 34 |
|
| 35 |
_CITATION = ""
|
|
|
|
| 58 |
candidates = [float(uniq[0])]
|
| 59 |
else:
|
| 60 |
mids = (uniq[:-1] + uniq[1:]) / 2.0
|
| 61 |
+
candidates = [float(uniq[0] - 1e-12), *mids.tolist(), float(uniq[-1] + 1e-12)]
|
| 62 |
best_t, best_ba = None, -1.0
|
| 63 |
yt = (np.asarray(y_true_col) == 1).astype(int)
|
| 64 |
for t in candidates:
|
|
|
|
| 68 |
tn = float(((yt == 0) & (yp == 0)).sum())
|
| 69 |
fp = float(((yt == 0) & (yp == 1)).sum())
|
| 70 |
ba = _binary_ba(tp, fn, tn, fp, zero_div)
|
| 71 |
+
if (ba > best_ba) or (abs(ba - best_ba) < 1e-12 and (best_t is None or abs(t - 0.5) < abs(best_t - 0.5))):
|
| 72 |
+
best_ba, best_t = float(ba), float(t)
|
| 73 |
return best_t, best_ba
|
| 74 |
|
| 75 |
|
|
|
|
| 108 |
raise ValueError(f"Shape mismatch: references {y_true_in.shape} vs predictions {y_pred_in.shape}.")
|
| 109 |
|
| 110 |
if ignore_index is not None:
|
| 111 |
+
valid = (y_true_in != ignore_index)
|
|
|
|
| 112 |
else:
|
| 113 |
valid = np.ones_like(y_true_in, dtype=bool)
|
| 114 |
|
|
|
|
| 124 |
|
| 125 |
per_label_ba = []
|
| 126 |
per_label_thr = []
|
| 127 |
+
# ---- compute per-label BA ----
|
| 128 |
if from_probas and threshold == "auto":
|
| 129 |
for j in labels:
|
| 130 |
vmask = valid[:, j]
|
|
|
|
| 149 |
|
| 150 |
per_label_ba = np.asarray(per_label_ba, dtype=float)
|
| 151 |
|
| 152 |
+
# 新增:每标签支持度(正样本数;考虑 ignore_index,有 class_mask 时按其顺序)
|
| 153 |
+
support_per_label = []
|
| 154 |
+
for j in labels:
|
| 155 |
+
vmask = valid[:, j]
|
| 156 |
+
support_per_label.append(int(y_true[vmask, j].sum()))
|
| 157 |
+
|
| 158 |
+
# ---- aggregate ----
|
| 159 |
if average == "macro":
|
| 160 |
score = float(np.mean(per_label_ba)) if per_label_ba.size else float("nan")
|
| 161 |
elif average == "weighted":
|
| 162 |
+
weights = np.asarray(support_per_label, dtype=float)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 163 |
if weights.sum() == 0:
|
| 164 |
+
score = float(np.mean(per_label_ba)) if per_label_ba.size else float("nan")
|
| 165 |
else:
|
| 166 |
score = float(np.average(per_label_ba, weights=weights))
|
| 167 |
elif average == "micro":
|
|
|
|
| 187 |
out["per_label_thresholds"] = [float(x) for x in per_label_thr]
|
| 188 |
if return_per_label:
|
| 189 |
out["per_label_ba"] = [float(x) for x in per_label_ba]
|
| 190 |
+
out["support_per_label"] = support_per_label
|
| 191 |
return out
|