OliverOnHF commited on
Commit
e99840d
·
verified ·
1 Parent(s): 09dda22

Upload 3 files

Browse files
balanced_accuracy.py CHANGED
@@ -30,9 +30,6 @@ Args:
30
  adjusted: bool, default False. (binary: 2*BA-1; multiclass: (BA-1/K)/(1-1/K))
31
  zero_division: float, default 0.0.
32
  threshold: float in (0,1) or "auto" (binary only).
33
- - float: binarize probs via (prob >= threshold)
34
- - "auto": choose threshold maximizing BA on given data (Youden's J)
35
- - if None: treat `predictions` as 0/1 labels (no binarization)
36
  ignore_index: int | None. If set, samples with reference == ignore_index are skipped.
37
  return_per_class: bool, default False — also return per-class recalls list (multiclass).
38
  class_mask: Optional[list[int]] — only average over these classes (multiclass).
@@ -56,6 +53,15 @@ def _safe_div(num, den, zero_div=0.0):
56
  return out
57
 
58
 
 
 
 
 
 
 
 
 
 
59
  def _binary_ba_from_labels(y_true_i, y_pred_i, zero_div):
60
  tp = float(((y_true_i == 1) & (y_pred_i == 1)).sum())
61
  fn = float(((y_true_i == 1) & (y_pred_i == 0)).sum())
@@ -114,33 +120,39 @@ class BalancedAccuracy(evaluate.Metric):
114
  class_mask: list[int] | None = None,
115
  sample_weight: list[float] | None = None,
116
  ):
117
- y_true = np.asarray(references).astype(int)
118
- y_pred_in = np.asarray(predictions)
119
 
 
120
  if ignore_index is not None:
121
- mask = y_true != ignore_index
122
  else:
123
- mask = np.ones_like(y_true, dtype=bool)
 
 
 
124
 
125
- y_true = y_true[mask]
126
- y_pred_in = y_pred_in[mask]
127
  if y_true.size == 0:
128
- return {"balanced_accuracy": float("nan")}
129
 
 
130
  w = None
131
  if sample_weight is not None:
132
  w_in = np.asarray(sample_weight, dtype=float)
133
- if w_in.shape[0] != mask.shape[0]:
134
- raise ValueError("sample_weight length must match number of samples.")
135
  w = w_in[mask]
136
 
137
- if y_true.ndim != 1 or y_pred_in.ndim != 1:
138
- raise ValueError("`references`/`predictions` must be 1D.")
139
 
140
  # ---- Binary ----
141
  if task == "binary":
142
- is_prob_like = not np.isin(np.unique(y_pred_in), [0.0, 1.0]).all()
 
 
143
  if is_prob_like:
 
 
144
  if threshold == "auto":
145
  t_opt, ba = _binary_find_best_threshold(y_true, y_pred_in, zero_division)
146
  if adjusted:
@@ -152,8 +164,12 @@ class BalancedAccuracy(evaluate.Metric):
152
  raise ValueError("`threshold` must be in (0,1) or 'auto'.")
153
  y_pred = (y_pred_in >= t).astype(int)
154
  else:
 
 
 
155
  y_pred = y_pred_in.astype(int)
156
 
 
157
  if w is None:
158
  tp = float(((y_true == 1) & (y_pred == 1)).sum())
159
  fn = float(((y_true == 1) & (y_pred == 0)).sum())
@@ -177,17 +193,22 @@ class BalancedAccuracy(evaluate.Metric):
177
  raise ValueError("`task` must be 'binary' or 'multiclass'.")
178
 
179
  y_pred = y_pred_in.astype(int)
 
180
  if num_classes is None:
181
  num_classes = int(max(y_true.max() if y_true.size else 0,
182
  y_pred.max() if y_pred.size else 0)) + 1
 
 
183
  if (y_pred < 0).any() or (y_pred >= num_classes).any():
184
  raise ValueError(f"`predictions` must be in [0,{num_classes-1}] for multiclass.")
 
 
185
 
186
  classes = list(range(num_classes))
187
- if class_mask:
188
  classes = [c for c in class_mask if 0 <= c < num_classes]
189
- if not classes:
190
- return {"balanced_accuracy": float("nan")}
191
 
192
  recalls, supports = [], []
193
  for c in classes:
 
30
  adjusted: bool, default False. (binary: 2*BA-1; multiclass: (BA-1/K)/(1-1/K))
31
  zero_division: float, default 0.0.
32
  threshold: float in (0,1) or "auto" (binary only).
 
 
 
33
  ignore_index: int | None. If set, samples with reference == ignore_index are skipped.
34
  return_per_class: bool, default False — also return per-class recalls list (multiclass).
35
  class_mask: Optional[list[int]] — only average over these classes (multiclass).
 
53
  return out
54
 
55
 
56
+ def _check_1d_same_len(y_true, y_pred, name_true="references", name_pred="predictions"):
57
+ if y_true.ndim != 1 or y_pred.ndim != 1:
58
+ raise ValueError(f"`{name_true}` and `{name_pred}` must be 1D.")
59
+ if y_true.shape[0] != y_pred.shape[0]:
60
+ raise ValueError(f"Length mismatch: `{name_true}`={y_true.shape[0]} vs `{name_pred}`={y_pred.shape[0]}.")
61
+ if not np.all(np.isfinite(y_pred)):
62
+ raise ValueError("`predictions` contains NaN/Inf.")
63
+
64
+
65
  def _binary_ba_from_labels(y_true_i, y_pred_i, zero_div):
66
  tp = float(((y_true_i == 1) & (y_pred_i == 1)).sum())
67
  fn = float(((y_true_i == 1) & (y_pred_i == 0)).sum())
 
120
  class_mask: list[int] | None = None,
121
  sample_weight: list[float] | None = None,
122
  ):
123
+ y_true_all = np.asarray(references).astype(int)
124
+ y_pred_all = np.asarray(predictions)
125
 
126
+ # ignore_index mask
127
  if ignore_index is not None:
128
+ mask = y_true_all != ignore_index
129
  else:
130
+ mask = np.ones_like(y_true_all, dtype=bool)
131
+
132
+ y_true = y_true_all[mask]
133
+ y_pred_in = y_pred_all[mask]
134
 
 
 
135
  if y_true.size == 0:
136
+ return {"balanced_accuracy": float("nan"), "reason": "empty_after_ignore_index"}
137
 
138
+ # weights
139
  w = None
140
  if sample_weight is not None:
141
  w_in = np.asarray(sample_weight, dtype=float)
142
+ if w_in.shape[0] != y_true_all.shape[0]:
143
+ raise ValueError("`sample_weight` length must match number of samples.")
144
  w = w_in[mask]
145
 
146
+ _check_1d_same_len(y_true, y_pred_in)
 
147
 
148
  # ---- Binary ----
149
  if task == "binary":
150
+ # if labels given, must be {0,1}
151
+ uniq_pred = np.unique(y_pred_in)
152
+ is_prob_like = not np.isin(uniq_pred, [0.0, 1.0]).all()
153
  if is_prob_like:
154
+ if np.any((y_pred_in < 0) | (y_pred_in > 1)):
155
+ raise ValueError("For binary with probabilities, `predictions` must be in [0,1].")
156
  if threshold == "auto":
157
  t_opt, ba = _binary_find_best_threshold(y_true, y_pred_in, zero_division)
158
  if adjusted:
 
164
  raise ValueError("`threshold` must be in (0,1) or 'auto'.")
165
  y_pred = (y_pred_in >= t).astype(int)
166
  else:
167
+ # labels mode: enforce {0,1}
168
+ if not np.isin(uniq_pred, [0, 1]).all():
169
+ raise ValueError("For binary with label predictions, values must be 0/1.")
170
  y_pred = y_pred_in.astype(int)
171
 
172
+ # weighted confusion
173
  if w is None:
174
  tp = float(((y_true == 1) & (y_pred == 1)).sum())
175
  fn = float(((y_true == 1) & (y_pred == 0)).sum())
 
193
  raise ValueError("`task` must be 'binary' or 'multiclass'.")
194
 
195
  y_pred = y_pred_in.astype(int)
196
+
197
  if num_classes is None:
198
  num_classes = int(max(y_true.max() if y_true.size else 0,
199
  y_pred.max() if y_pred.size else 0)) + 1
200
+ if num_classes <= 0:
201
+ raise ValueError("`num_classes` must be positive.")
202
  if (y_pred < 0).any() or (y_pred >= num_classes).any():
203
  raise ValueError(f"`predictions` must be in [0,{num_classes-1}] for multiclass.")
204
+ if (y_true < 0).any() or (y_true >= num_classes).any():
205
+ raise ValueError(f"`references` must be in [0,{num_classes-1}] for multiclass.")
206
 
207
  classes = list(range(num_classes))
208
+ if class_mask is not None and len(class_mask) > 0:
209
  classes = [c for c in class_mask if 0 <= c < num_classes]
210
+ if len(classes) == 0:
211
+ return {"balanced_accuracy": float("nan"), "reason": "empty_class_mask_after_filtering"}
212
 
213
  recalls, supports = [], []
214
  for c in classes:
balanced_accuracy_multilabel.py CHANGED
@@ -20,18 +20,14 @@ Args:
20
  predictions: list[list[int or float]] of shape (N, L).
21
  If from_probas=True, values are probabilities in [0,1]; otherwise 0/1 labels.
22
  references: list[list[int or float]] of shape (N, L). 0/1 labels; ignore_index is allowed.
23
- from_probas: bool, default False. If True, binarize predictions with `threshold`.
24
  threshold: float in (0,1) or 'auto' (per-label). Default 0.5 if from_probas=True.
25
  zero_division: float, default 0.0.
26
  average: 'macro'|'weighted'|'micro', default 'macro'.
27
  class_mask: Optional[list[int]] — only average over these label indices.
28
  ignore_index: int | None, default None.
29
- return_per_label: bool, default False — also return per-label BA list (after masking).
30
  sample_weight: Optional[list[float]] — per-sample weights.
31
- Returns:
32
- {"balanced_accuracy": float}
33
- + (from_probas & threshold='auto'): {"per_label_thresholds": list[float]}
34
- + (return_per_label): {"per_label_ba": list[float], "support_per_label": list[int]}
35
  """
36
 
37
  _CITATION = ""
@@ -53,7 +49,6 @@ def _binary_ba(tp, fn, tn, fp, zero_div):
53
 
54
 
55
  def _best_threshold_per_label(y_true_col, prob_col, zero_div):
56
- """Return (t_opt, ba_opt) using Youden's J for a single label."""
57
  p = np.asarray(prob_col, dtype=float)
58
  uniq = np.unique(p)
59
  if uniq.size == 1:
@@ -105,11 +100,17 @@ class BalancedAccuracyMultilabel(evaluate.Metric):
105
  y_true_in = np.asarray(references, dtype=float)
106
  y_pred_in = np.asarray(predictions, dtype=float)
107
 
 
108
  if y_true_in.ndim != 2 or y_pred_in.ndim != 2:
109
  raise ValueError("Multilabel expects 2D arrays of shape (N, L).")
110
  if y_true_in.shape != y_pred_in.shape:
111
  raise ValueError(f"Shape mismatch: references {y_true_in.shape} vs predictions {y_pred_in.shape}.")
 
 
 
 
112
 
 
113
  if ignore_index is not None:
114
  valid = (y_true_in != ignore_index)
115
  else:
@@ -119,10 +120,11 @@ class BalancedAccuracyMultilabel(evaluate.Metric):
119
  probs = y_pred_in
120
 
121
  N, L = y_true.shape
 
122
  if sample_weight is not None:
123
  w_in = np.asarray(sample_weight, dtype=float)
124
  if w_in.shape[0] != N:
125
- raise ValueError("sample_weight length must match number of samples.")
126
  else:
127
  w_in = None
128
 
@@ -130,10 +132,26 @@ class BalancedAccuracyMultilabel(evaluate.Metric):
130
  if class_mask:
131
  labels = [j for j in class_mask if 0 <= j < L]
132
  if not labels:
133
- return {"balanced_accuracy": float("nan")}
134
 
135
- per_label_ba = []
136
- per_label_thr = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
 
138
  if from_probas and threshold == "auto":
139
  for j in labels:
@@ -142,12 +160,7 @@ class BalancedAccuracyMultilabel(evaluate.Metric):
142
  per_label_ba.append(float(ba_opt))
143
  per_label_thr.append(float(t_opt))
144
  else:
145
- if from_probas:
146
- thr = 0.5 if threshold is None else float(threshold)
147
- y_pred = (probs >= thr).astype(int)
148
- else:
149
- y_pred = probs.astype(int)
150
-
151
  for j in labels:
152
  vmask = valid[:, j]
153
  yt, yp = y_true[vmask, j], y_pred[vmask, j]
@@ -178,7 +191,7 @@ class BalancedAccuracyMultilabel(evaluate.Metric):
178
  score = float(np.average(per_label_ba, weights=weights)) if weights.sum() > 0 else (
179
  float(np.mean(per_label_ba)) if per_label_ba.size else float("nan")
180
  )
181
- elif average == "micro":
182
  TP = FP = TN = FN = 0.0
183
  for j in labels:
184
  vmask = valid[:, j]
@@ -200,8 +213,6 @@ class BalancedAccuracyMultilabel(evaluate.Metric):
200
  TN += float(wv[((yt == 0) & (yp == 0))].sum())
201
  FP += float(wv[((yt == 0) & (yp == 1))].sum())
202
  score = float(_binary_ba(TP, FN, TN, FP, zero_division))
203
- else:
204
- raise ValueError("average must be one of {'macro','weighted','micro'}.")
205
 
206
  out = {"balanced_accuracy": score}
207
  if from_probas and threshold == "auto":
 
20
  predictions: list[list[int or float]] of shape (N, L).
21
  If from_probas=True, values are probabilities in [0,1]; otherwise 0/1 labels.
22
  references: list[list[int or float]] of shape (N, L). 0/1 labels; ignore_index is allowed.
23
+ from_probas: bool, default False.
24
  threshold: float in (0,1) or 'auto' (per-label). Default 0.5 if from_probas=True.
25
  zero_division: float, default 0.0.
26
  average: 'macro'|'weighted'|'micro', default 'macro'.
27
  class_mask: Optional[list[int]] — only average over these label indices.
28
  ignore_index: int | None, default None.
29
+ return_per_label: bool, default False.
30
  sample_weight: Optional[list[float]] — per-sample weights.
 
 
 
 
31
  """
32
 
33
  _CITATION = ""
 
49
 
50
 
51
  def _best_threshold_per_label(y_true_col, prob_col, zero_div):
 
52
  p = np.asarray(prob_col, dtype=float)
53
  uniq = np.unique(p)
54
  if uniq.size == 1:
 
100
  y_true_in = np.asarray(references, dtype=float)
101
  y_pred_in = np.asarray(predictions, dtype=float)
102
 
103
+ # basic checks
104
  if y_true_in.ndim != 2 or y_pred_in.ndim != 2:
105
  raise ValueError("Multilabel expects 2D arrays of shape (N, L).")
106
  if y_true_in.shape != y_pred_in.shape:
107
  raise ValueError(f"Shape mismatch: references {y_true_in.shape} vs predictions {y_pred_in.shape}.")
108
+ if not np.all(np.isfinite(y_pred_in)):
109
+ raise ValueError("`predictions` contains NaN/Inf.")
110
+ if average not in {"macro", "weighted", "micro"}:
111
+ raise ValueError("`average` must be one of {'macro','weighted','micro'}.")
112
 
113
+ # validity mask
114
  if ignore_index is not None:
115
  valid = (y_true_in != ignore_index)
116
  else:
 
120
  probs = y_pred_in
121
 
122
  N, L = y_true.shape
123
+ # weights
124
  if sample_weight is not None:
125
  w_in = np.asarray(sample_weight, dtype=float)
126
  if w_in.shape[0] != N:
127
+ raise ValueError("`sample_weight` length must match number of samples.")
128
  else:
129
  w_in = None
130
 
 
132
  if class_mask:
133
  labels = [j for j in class_mask if 0 <= j < L]
134
  if not labels:
135
+ return {"balanced_accuracy": float("nan"), "reason": "empty_class_mask_after_filtering"}
136
 
137
+ # input value checks
138
+ if from_probas:
139
+ if threshold != "auto":
140
+ thr = 0.5 if threshold is None else float(threshold)
141
+ if not (0.0 < thr < 1.0):
142
+ raise ValueError("`threshold` must be in (0,1) or 'auto' when from_probas=True.")
143
+ if np.any((probs < 0) | (probs > 1)):
144
+ raise ValueError("When from_probas=True, `predictions` must be in [0,1].")
145
+ else:
146
+ uniq = np.unique(probs[valid])
147
+ if not np.isin(uniq, [0.0, 1.0]).all():
148
+ raise ValueError("When from_probas=False, `predictions` must be 0/1 labels.")
149
+
150
+ # if everything invalid after ignore_index:
151
+ if not np.any(valid):
152
+ return {"balanced_accuracy": float("nan"), "reason": "empty_after_ignore_index"}
153
+
154
+ per_label_ba, per_label_thr = [], []
155
 
156
  if from_probas and threshold == "auto":
157
  for j in labels:
 
160
  per_label_ba.append(float(ba_opt))
161
  per_label_thr.append(float(t_opt))
162
  else:
163
+ y_pred = (probs >= thr).astype(int) if from_probas else probs.astype(int)
 
 
 
 
 
164
  for j in labels:
165
  vmask = valid[:, j]
166
  yt, yp = y_true[vmask, j], y_pred[vmask, j]
 
191
  score = float(np.average(per_label_ba, weights=weights)) if weights.sum() > 0 else (
192
  float(np.mean(per_label_ba)) if per_label_ba.size else float("nan")
193
  )
194
+ else: # micro
195
  TP = FP = TN = FN = 0.0
196
  for j in labels:
197
  vmask = valid[:, j]
 
213
  TN += float(wv[((yt == 0) & (yp == 0))].sum())
214
  FP += float(wv[((yt == 0) & (yp == 1))].sum())
215
  score = float(_binary_ba(TP, FN, TN, FP, zero_division))
 
 
216
 
217
  out = {"balanced_accuracy": score}
218
  if from_probas and threshold == "auto":
balanced_topk_accuracy.py CHANGED
@@ -18,11 +18,8 @@ Args:
18
  k_list: Optional[list[int]] to compute multiple ks at once (e.g., [1,5]).
19
  class_mask: Optional[list[int]] — only average over these classes (e.g., tail classes).
20
  sample_weight: Optional[list[float]] — per-sample weights.
21
- zero_division: float, default 0.0. Used when a class has no positive samples.
22
  return_per_class: bool, default False. If True, also return per-class recalls@k.
23
- Returns:
24
- dict with {"balanced_topk_accuracy": float or dict[int,float]}.
25
- If k_list provided, returns a dict mapping k -> score. Optionally adds "per_class_recall".
26
  """
27
 
28
  _CITATION = ""
@@ -59,23 +56,25 @@ class BalancedTopKAccuracy(evaluate.Metric):
59
  scores = np.asarray(predictions, dtype=float)
60
 
61
  if scores.ndim != 2 or y_true.ndim != 1:
62
- raise ValueError("predictions must be (N, K) scores; references must be 1D labels.")
63
  N, K = scores.shape
64
  if y_true.shape[0] != N:
65
- raise ValueError(f"Shape mismatch: references length {y_true.shape[0]} vs predictions {N}.")
 
 
66
  if (y_true < 0).any() or (y_true >= K).any():
67
- raise ValueError(f"references must be within [0, {K-1}] for given predictions shape (N, {K}).")
68
 
69
  w = None
70
  if sample_weight is not None:
71
  w = np.asarray(sample_weight, dtype=float)
72
  if w.shape[0] != N:
73
- raise ValueError("sample_weight length must match number of samples.")
74
 
75
  ks_in = k_list if k_list is not None else [k if k is not None else 1]
76
  ks = sorted(set(int(x) for x in ks_in if x is not None and int(x) >= 1))
77
  if not ks:
78
- raise ValueError("Provide a positive integer k or a non-empty k_list.")
79
  ks = [min(kk, K) for kk in ks]
80
 
81
  if class_mask is None or len(class_mask) == 0:
@@ -83,7 +82,7 @@ class BalancedTopKAccuracy(evaluate.Metric):
83
  else:
84
  classes = [c for c in class_mask if 0 <= c < K]
85
  if not classes:
86
- return {"balanced_topk_accuracy": float("nan")}
87
 
88
  sorted_idx = np.argsort(-scores, axis=1)
89
 
@@ -103,7 +102,7 @@ class BalancedTopKAccuracy(evaluate.Metric):
103
  hits = float(w[mask_c][hit_mask].sum()) if w is not None else float(hit_mask.sum())
104
  recalls.append(_div(hits, denom, zero_division))
105
 
106
- ba_k = float(np.mean(recalls))
107
  results[kk] = ba_k
108
  if return_per_class:
109
  per_class[kk] = [float(x) for x in recalls]
 
18
  k_list: Optional[list[int]] to compute multiple ks at once (e.g., [1,5]).
19
  class_mask: Optional[list[int]] — only average over these classes (e.g., tail classes).
20
  sample_weight: Optional[list[float]] — per-sample weights.
21
+ zero_division: float, default 0.0.
22
  return_per_class: bool, default False. If True, also return per-class recalls@k.
 
 
 
23
  """
24
 
25
  _CITATION = ""
 
56
  scores = np.asarray(predictions, dtype=float)
57
 
58
  if scores.ndim != 2 or y_true.ndim != 1:
59
+ raise ValueError("`predictions` must be (N, K) scores; `references` must be 1D labels.")
60
  N, K = scores.shape
61
  if y_true.shape[0] != N:
62
+ raise ValueError(f"Length mismatch: references {y_true.shape[0]} vs predictions {N}.")
63
+ if not np.all(np.isfinite(scores)):
64
+ raise ValueError("`predictions` contains NaN/Inf.")
65
  if (y_true < 0).any() or (y_true >= K).any():
66
+ raise ValueError(f"`references` must be within [0, {K-1}] for given predictions shape (N,{K}).")
67
 
68
  w = None
69
  if sample_weight is not None:
70
  w = np.asarray(sample_weight, dtype=float)
71
  if w.shape[0] != N:
72
+ raise ValueError("`sample_weight` length must match number of samples.")
73
 
74
  ks_in = k_list if k_list is not None else [k if k is not None else 1]
75
  ks = sorted(set(int(x) for x in ks_in if x is not None and int(x) >= 1))
76
  if not ks:
77
+ raise ValueError("Provide a positive integer `k` or a non-empty `k_list`.")
78
  ks = [min(kk, K) for kk in ks]
79
 
80
  if class_mask is None or len(class_mask) == 0:
 
82
  else:
83
  classes = [c for c in class_mask if 0 <= c < K]
84
  if not classes:
85
+ return {"balanced_topk_accuracy": float("nan"), "reason": "empty_class_mask_after_filtering"}
86
 
87
  sorted_idx = np.argsort(-scores, axis=1)
88
 
 
102
  hits = float(w[mask_c][hit_mask].sum()) if w is not None else float(hit_mask.sum())
103
  recalls.append(_div(hits, denom, zero_division))
104
 
105
+ ba_k = float(np.mean(recalls)) if recalls else float("nan")
106
  results[kk] = ba_k
107
  if return_per_class:
108
  per_class[kk] = [float(x) for x in recalls]