OliverOnHF commited on
Commit
6a9f203
·
verified ·
1 Parent(s): 8fc021a

Upload balanced_accuracy.py

Browse files
Files changed (1) hide show
  1. balanced_accuracy.py +93 -65
balanced_accuracy.py CHANGED
@@ -9,28 +9,34 @@ Definitions
9
  - Binary: (TPR + TNR) / 2
10
  - Multiclass: macro-average of per-class recall
11
 
12
- Quality-of-life
13
- - from_probas=True: accept probabilities/logits directly (binary threshold or multiclass argmax)
14
  - ignore_index: skip unlabeled samples (e.g., -100)
15
  - adjusted=True: sklearn-style chance correction
 
 
16
  """
17
 
18
  _KWARGS_DESCRIPTION = """
19
  Args:
20
- predictions: 1D list/array. Integer labels by default; if from_probas=True, pass probabilities/logits.
21
- - binary: list[0/1] or list[float] (prob for positive class)
22
- - multiclass: list[int class_id] or 2D array-like of shape (N, K) with class scores/probs
23
  references: 1D list/array of integer labels.
24
- task: "binary" | "multiclass". (Default: "binary")
25
- num_classes: int, required for multiclass if labels are not 0..K-1 (optional if from_probas=True and scores provided).
26
  adjusted: bool, default False. (binary: 2*BA-1; multiclass: (BA-1/K)/(1-1/K))
27
- zero_division: float, default 0.0. Value used when a denominator is 0 (e.g., no positives for a class).
28
- from_probas: bool, default False. If True, `predictions` will be interpreted as probabilities/logits.
29
- threshold: float, default 0.5. Binary threshold when from_probas=True and predictions are 1D.
30
- proba_pos_index: int, default 1. If binary and predictions are 2D scores with shape (N, 2), which column is positive.
31
- ignore_index: int | None, default None. If set, samples whose reference == ignore_index will be skipped.
 
 
 
32
  Returns:
33
- dict: {"balanced_accuracy": float}
 
34
  """
35
 
36
  _CITATION = ""
@@ -45,26 +51,33 @@ def _safe_div(num, den, zero_div=0.0):
45
  return out
46
 
47
 
48
- def _to_int_labels_binary(y_pred, from_probas: bool, threshold: float, proba_pos_index: int):
49
- y_pred = np.asarray(y_pred)
50
- if from_probas:
51
- if y_pred.ndim == 1:
52
- return (y_pred >= threshold).astype(int)
53
- if y_pred.ndim == 2:
54
- if y_pred.shape[1] == 2:
55
- return (y_pred[:, proba_pos_index] >= threshold).astype(int)
56
- return np.argmax(y_pred, axis=1).astype(int)
57
- raise ValueError("Binary from_probas expects 1D (prob) or 2D (scores) predictions.")
58
- return y_pred.astype(int)
59
 
60
 
61
- def _to_int_labels_multiclass(y_pred, from_probas: bool):
62
- y_pred = np.asarray(y_pred)
63
- if from_probas:
64
- if y_pred.ndim != 2:
65
- raise ValueError("Multiclass with from_probas=True expects 2D scores/probs of shape (N, K).")
66
- return np.argmax(y_pred, axis=1).astype(int)
67
- return y_pred.astype(int)
 
 
 
 
 
 
 
 
 
 
68
 
69
 
70
  class BalancedAccuracy(evaluate.Metric):
@@ -92,56 +105,71 @@ class BalancedAccuracy(evaluate.Metric):
92
  num_classes: int | None = None,
93
  adjusted: bool = False,
94
  zero_division: float = 0.0,
95
- from_probas: bool = False,
96
- threshold: float = 0.5,
97
- proba_pos_index: int = 1,
98
  ignore_index: int | None = None,
 
 
99
  ):
100
- y_true = np.asarray(references)
101
- y_pred_raw = np.asarray(predictions)
102
 
103
  if ignore_index is not None:
104
  mask = y_true != ignore_index
105
  y_true = y_true[mask]
106
- y_pred_raw = y_pred_raw[mask]
107
  if y_true.size == 0:
108
  return {"balanced_accuracy": float("nan")}
109
 
110
  if y_true.ndim != 1:
111
  raise ValueError("`references` must be 1D integer labels, e.g., [0,1,1,0].")
 
 
112
 
 
113
  if task == "binary":
114
- y_pred = _to_int_labels_binary(y_pred_raw, from_probas, threshold, proba_pos_index)
115
- y_true = y_true.astype(int)
116
-
117
- tp = float(((y_true == 1) & (y_pred == 1)).sum())
118
- fn = float(((y_true == 1) & (y_pred == 0)).sum())
119
- tn = float(((y_true == 0) & (y_pred == 0)).sum())
120
- fp = float(((y_true == 0) & (y_pred == 1)).sum())
121
- tpr = _safe_div(tp, tp + fn, zero_division)
122
- tnr = _safe_div(tn, tn + fp, zero_division)
123
- ba = 0.5 * (tpr + tnr)
 
 
 
 
 
124
  if adjusted:
125
- ba = 2 * ba - 1 # (BA - 0.5)/0.5
126
  return {"balanced_accuracy": float(ba)}
127
 
128
- if task == "multiclass":
129
- y_pred = _to_int_labels_multiclass(y_pred_raw, from_probas)
130
- y_true = y_true.astype(int)
131
 
132
- if num_classes is None:
133
- if from_probas and y_pred_raw.ndim == 2:
134
- num_classes = y_pred_raw.shape[1]
135
- else:
136
- num_classes = int(max(y_true.max(), y_pred.max())) + 1
137
 
138
- tp = np.array([(y_pred[y_true == c] == c).sum() for c in range(num_classes)], dtype=float)
139
- fn = np.array([(y_pred[y_true == c] != c).sum() for c in range(num_classes)], dtype=float)
140
- recall_c = _safe_div(tp, tp + fn, zero_division)
141
- ba = float(recall_c.mean())
142
- if adjusted:
143
- chance = 1.0 / float(num_classes)
144
- ba = float((ba - chance) / (1.0 - chance))
145
- return {"balanced_accuracy": ba}
 
 
 
 
 
 
146
 
147
- raise ValueError("`task` must be 'binary' or 'multiclass'.")
 
 
 
 
9
  - Binary: (TPR + TNR) / 2
10
  - Multiclass: macro-average of per-class recall
11
 
12
+ Extras
13
+ - threshold="auto": pick the best threshold for binary probabilities (Youden's J)
14
  - ignore_index: skip unlabeled samples (e.g., -100)
15
  - adjusted=True: sklearn-style chance correction
16
+ - return_per_class=True: also return per-class recalls (multiclass)
17
+ - class_mask=[...] (multiclass): average over a subset of classes
18
  """
19
 
20
  _KWARGS_DESCRIPTION = """
21
  Args:
22
+ predictions: 1D list/array.
23
+ Binary: integer labels {0,1}, or probabilities in [0,1] (if threshold given)
24
+ Multiclass: integer labels {0..K-1}
25
  references: 1D list/array of integer labels.
26
+ task: "binary" | "multiclass" (Default: "binary")
27
+ num_classes: int, for multiclass; inferred if labels are 0..K-1.
28
  adjusted: bool, default False. (binary: 2*BA-1; multiclass: (BA-1/K)/(1-1/K))
29
+ zero_division: float, default 0.0. Value used when a denominator is 0.
30
+ threshold: float in (0,1) or "auto" (binary only).
31
+ - float: binarize probs via (prob >= threshold)
32
+ - "auto": choose threshold maximizing BA on given data (Youden's J)
33
+ - if None: treat `predictions` as 0/1 labels (no binarization)
34
+ ignore_index: int | None, default None. If set, samples with reference == ignore_index are skipped.
35
+ return_per_class: bool, default False (multiclass) — also return per-class recalls list.
36
+ class_mask: Optional[list[int]] — only average over these classes (multiclass).
37
  Returns:
38
+ dict with at least {"balanced_accuracy": float}. If threshold="auto" (binary) adds {"optimal_threshold": float}.
39
+ If return_per_class=True (multiclass) adds {"per_class_recall": list[float]}.
40
  """
41
 
42
  _CITATION = ""
 
51
  return out
52
 
53
 
54
+ def _binary_ba_from_labels(y_true_i, y_pred_i, zero_div):
55
+ tp = float(((y_true_i == 1) & (y_pred_i == 1)).sum())
56
+ fn = float(((y_true_i == 1) & (y_pred_i == 0)).sum())
57
+ tn = float(((y_true_i == 0) & (y_pred_i == 0)).sum())
58
+ fp = float(((y_true_i == 0) & (y_pred_i == 1)).sum())
59
+ tpr = _safe_div(tp, tp + fn, zero_div)
60
+ tnr = _safe_div(tn, tn + fp, zero_div)
61
+ return 0.5 * (tpr + tnr)
 
 
 
62
 
63
 
64
+ def _binary_find_best_threshold(y_true, probs, zero_div):
65
+ p = np.asarray(probs, dtype=float)
66
+ uniq = np.unique(p)
67
+ candidates = []
68
+ if uniq.size == 1:
69
+ candidates = [uniq[0]]
70
+ else:
71
+ mids = (uniq[:-1] + uniq[1:]) / 2.0
72
+ candidates = [uniq[0] - 1e-12] + mids.tolist() + [uniq[-1] + 1e-12]
73
+
74
+ best_t, best_ba = None, -1.0
75
+ for t in candidates:
76
+ y_pred = (p >= t).astype(int)
77
+ ba = _binary_ba_from_labels(y_true, y_pred, zero_div)
78
+ if ba > best_ba or (abs(ba - best_ba) < 1e-12 and abs(t - 0.5) < abs(best_t - 0.5)):
79
+ best_ba, best_t = ba, t
80
+ return float(best_t), float(best_ba)
81
 
82
 
83
  class BalancedAccuracy(evaluate.Metric):
 
105
  num_classes: int | None = None,
106
  adjusted: bool = False,
107
  zero_division: float = 0.0,
108
+ threshold: float | str | None = None, # binary only: float or "auto" or None
 
 
109
  ignore_index: int | None = None,
110
+ return_per_class: bool = False,
111
+ class_mask: list[int] | None = None,
112
  ):
113
+ y_true = np.asarray(references).astype(int)
114
+ y_pred_in = np.asarray(predictions)
115
 
116
  if ignore_index is not None:
117
  mask = y_true != ignore_index
118
  y_true = y_true[mask]
119
+ y_pred_in = y_pred_in[mask]
120
  if y_true.size == 0:
121
  return {"balanced_accuracy": float("nan")}
122
 
123
  if y_true.ndim != 1:
124
  raise ValueError("`references` must be 1D integer labels, e.g., [0,1,1,0].")
125
+ if y_pred_in.ndim != 1:
126
+ raise ValueError("`predictions` must be 1D labels or probabilities (for binary).")
127
 
128
+ # ---- Binary ----
129
  if task == "binary":
130
+ is_prob_like = not np.isin(np.unique(y_pred_in), [0.0, 1.0]).all()
131
+
132
+ if is_prob_like:
133
+ if threshold == "auto":
134
+ t_opt, ba = _binary_find_best_threshold(y_true, y_pred_in, zero_division)
135
+ if adjusted:
136
+ ba = 2 * ba - 1
137
+ return {"balanced_accuracy": float(ba), "optimal_threshold": float(t_opt)}
138
+ else:
139
+ t = 0.5 if (threshold is None) else float(threshold)
140
+ y_pred = (y_pred_in >= t).astype(int)
141
+ else:
142
+ y_pred = y_pred_in.astype(int)
143
+
144
+ ba = _binary_ba_from_labels(y_true, y_pred, zero_division)
145
  if adjusted:
146
+ ba = 2 * ba - 1
147
  return {"balanced_accuracy": float(ba)}
148
 
149
+ # ---- Multiclass ----
150
+ if task != "multiclass":
151
+ raise ValueError("`task` must be 'binary' or 'multiclass'.")
152
 
153
+ y_pred = y_pred_in.astype(int)
154
+ if num_classes is None:
155
+ num_classes = int(max(y_true.max() if y_true.size else 0, y_pred.max() if y_pred.size else 0)) + 1
 
 
156
 
157
+ classes = list(range(num_classes))
158
+ if class_mask is not None and len(class_mask) > 0:
159
+ classes = [c for c in class_mask if 0 <= c < num_classes]
160
+ if len(classes) == 0:
161
+ return {"balanced_accuracy": float("nan")}
162
+
163
+ tp = np.array([(y_pred[y_true == c] == c).sum() for c in classes], dtype=float)
164
+ fn = np.array([(y_pred[y_true == c] != c).sum() for c in classes], dtype=float)
165
+ recall_c = _safe_div(tp, tp + fn, zero_division)
166
+ ba = float(recall_c.mean())
167
+
168
+ if adjusted:
169
+ chance = 1.0 / float(len(classes))
170
+ ba = float((ba - chance) / (1.0 - chance))
171
 
172
+ out = {"balanced_accuracy": ba}
173
+ if return_per_class:
174
+ out["per_class_recall"] = recall_c.tolist()
175
+ return out