OliverOnHF commited on
Commit
09dda22
·
verified ·
1 Parent(s): d6b86cf

Upload 3 files

Browse files
balanced_accuracy.py CHANGED
@@ -16,6 +16,7 @@ Extras
16
  - return_per_class=True: also return per-class recalls (multiclass)
17
  - class_mask=[...] (multiclass): average over a subset of classes
18
  - support_per_class: when return_per_class=True (multiclass), also return true sample counts per class
 
19
  """
20
 
21
  _KWARGS_DESCRIPTION = """
@@ -35,11 +36,12 @@ Args:
35
  ignore_index: int | None. If set, samples with reference == ignore_index are skipped.
36
  return_per_class: bool, default False — also return per-class recalls list (multiclass).
37
  class_mask: Optional[list[int]] — only average over these classes (multiclass).
 
38
  Returns:
39
  {"balanced_accuracy": float}
40
  + (binary, threshold="auto"): {"optimal_threshold": float}
41
  + (multiclass, return_per_class=True):
42
- {"per_class_recall": list[float], "support_per_class": list[int]}
43
  """
44
 
45
  _CITATION = ""
@@ -110,16 +112,27 @@ class BalancedAccuracy(evaluate.Metric):
110
  ignore_index: int | None = None,
111
  return_per_class: bool = False,
112
  class_mask: list[int] | None = None,
 
113
  ):
114
  y_true = np.asarray(references).astype(int)
115
  y_pred_in = np.asarray(predictions)
116
 
117
  if ignore_index is not None:
118
  mask = y_true != ignore_index
119
- y_true = y_true[mask]
120
- y_pred_in = y_pred_in[mask]
121
- if y_true.size == 0:
122
- return {"balanced_accuracy": float("nan")}
 
 
 
 
 
 
 
 
 
 
123
 
124
  if y_true.ndim != 1 or y_pred_in.ndim != 1:
125
  raise ValueError("`references`/`predictions` must be 1D.")
@@ -141,7 +154,20 @@ class BalancedAccuracy(evaluate.Metric):
141
  else:
142
  y_pred = y_pred_in.astype(int)
143
 
144
- ba = _binary_ba_from_labels(y_true, y_pred, zero_division)
 
 
 
 
 
 
 
 
 
 
 
 
 
145
  if adjusted:
146
  ba = 2 * ba - 1
147
  return {"balanced_accuracy": float(ba)}
@@ -163,9 +189,20 @@ class BalancedAccuracy(evaluate.Metric):
163
  if not classes:
164
  return {"balanced_accuracy": float("nan")}
165
 
166
- tp = np.array([(y_pred[y_true == c] == c).sum() for c in classes], dtype=float)
167
- fn = np.array([(y_pred[y_true == c] != c).sum() for c in classes], dtype=float)
168
- recall_c = _safe_div(tp, tp + fn, zero_division)
 
 
 
 
 
 
 
 
 
 
 
169
  ba = float(recall_c.mean())
170
  if adjusted:
171
  chance = 1.0 / float(len(classes))
@@ -174,5 +211,5 @@ class BalancedAccuracy(evaluate.Metric):
174
  out = {"balanced_accuracy": ba}
175
  if return_per_class:
176
  out["per_class_recall"] = recall_c.tolist()
177
- out["support_per_class"] = [int((y_true == c).sum()) for c in classes]
178
  return out
 
16
  - return_per_class=True: also return per-class recalls (multiclass)
17
  - class_mask=[...] (multiclass): average over a subset of classes
18
  - support_per_class: when return_per_class=True (multiclass), also return true sample counts per class
19
+ - sample_weight: per-sample weights for binary/multiclass; replaces counts with weighted sums
20
  """
21
 
22
  _KWARGS_DESCRIPTION = """
 
36
  ignore_index: int | None. If set, samples with reference == ignore_index are skipped.
37
  return_per_class: bool, default False — also return per-class recalls list (multiclass).
38
  class_mask: Optional[list[int]] — only average over these classes (multiclass).
39
+ sample_weight: Optional[list[float]] — per-sample weights.
40
  Returns:
41
  {"balanced_accuracy": float}
42
  + (binary, threshold="auto"): {"optimal_threshold": float}
43
  + (multiclass, return_per_class=True):
44
+ {"per_class_recall": list[float], "support_per_class": list[int or float]}
45
  """
46
 
47
  _CITATION = ""
 
112
  ignore_index: int | None = None,
113
  return_per_class: bool = False,
114
  class_mask: list[int] | None = None,
115
+ sample_weight: list[float] | None = None,
116
  ):
117
  y_true = np.asarray(references).astype(int)
118
  y_pred_in = np.asarray(predictions)
119
 
120
  if ignore_index is not None:
121
  mask = y_true != ignore_index
122
+ else:
123
+ mask = np.ones_like(y_true, dtype=bool)
124
+
125
+ y_true = y_true[mask]
126
+ y_pred_in = y_pred_in[mask]
127
+ if y_true.size == 0:
128
+ return {"balanced_accuracy": float("nan")}
129
+
130
+ w = None
131
+ if sample_weight is not None:
132
+ w_in = np.asarray(sample_weight, dtype=float)
133
+ if w_in.shape[0] != mask.shape[0]:
134
+ raise ValueError("sample_weight length must match number of samples.")
135
+ w = w_in[mask]
136
 
137
  if y_true.ndim != 1 or y_pred_in.ndim != 1:
138
  raise ValueError("`references`/`predictions` must be 1D.")
 
154
  else:
155
  y_pred = y_pred_in.astype(int)
156
 
157
+ if w is None:
158
+ tp = float(((y_true == 1) & (y_pred == 1)).sum())
159
+ fn = float(((y_true == 1) & (y_pred == 0)).sum())
160
+ tn = float(((y_true == 0) & (y_pred == 0)).sum())
161
+ fp = float(((y_true == 0) & (y_pred == 1)).sum())
162
+ else:
163
+ tp = float(w[((y_true == 1) & (y_pred == 1))].sum())
164
+ fn = float(w[((y_true == 1) & (y_pred == 0))].sum())
165
+ tn = float(w[((y_true == 0) & (y_pred == 0))].sum())
166
+ fp = float(w[((y_true == 0) & (y_pred == 1))].sum())
167
+
168
+ tpr = _safe_div(tp, tp + fn, zero_division)
169
+ tnr = _safe_div(tn, tn + fp, zero_division)
170
+ ba = 0.5 * (tpr + tnr)
171
  if adjusted:
172
  ba = 2 * ba - 1
173
  return {"balanced_accuracy": float(ba)}
 
189
  if not classes:
190
  return {"balanced_accuracy": float("nan")}
191
 
192
+ recalls, supports = [], []
193
+ for c in classes:
194
+ mask_c = (y_true == c)
195
+ if w is None:
196
+ denom = float(mask_c.sum())
197
+ num = float((mask_c & (y_pred == c)).sum())
198
+ supports.append(int(denom))
199
+ else:
200
+ denom = float((w[mask_c]).sum())
201
+ num = float((w[mask_c & (y_pred == c)]).sum())
202
+ supports.append(float(denom))
203
+ recalls.append(float(_safe_div(num, denom, zero_division)))
204
+
205
+ recall_c = np.asarray(recalls, dtype=float)
206
  ba = float(recall_c.mean())
207
  if adjusted:
208
  chance = 1.0 / float(len(classes))
 
211
  out = {"balanced_accuracy": ba}
212
  if return_per_class:
213
  out["per_class_recall"] = recall_c.tolist()
214
+ out["support_per_class"] = supports
215
  return out
balanced_accuracy_multilabel.py CHANGED
@@ -12,6 +12,7 @@ Extras:
12
  - class_mask=[...] (evaluate a subset of labels)
13
  - ignore_index to skip unlabeled samples (e.g., -100)
14
  - support_per_label: when return_per_label=True, also return true positive counts per label (after masking)
 
15
  """
16
 
17
  _KWARGS_DESCRIPTION = """
@@ -26,6 +27,7 @@ Args:
26
  class_mask: Optional[list[int]] — only average over these label indices.
27
  ignore_index: int | None, default None.
28
  return_per_label: bool, default False — also return per-label BA list (after masking).
 
29
  Returns:
30
  {"balanced_accuracy": float}
31
  + (from_probas & threshold='auto'): {"per_label_thresholds": list[float]}
@@ -98,6 +100,7 @@ class BalancedAccuracyMultilabel(evaluate.Metric):
98
  class_mask: list[int] | None = None,
99
  ignore_index: int | None = None,
100
  return_per_label: bool = False,
 
101
  ):
102
  y_true_in = np.asarray(references, dtype=float)
103
  y_pred_in = np.asarray(predictions, dtype=float)
@@ -115,7 +118,14 @@ class BalancedAccuracyMultilabel(evaluate.Metric):
115
  y_true = (y_true_in == 1).astype(int)
116
  probs = y_pred_in
117
 
118
- L = y_true.shape[1]
 
 
 
 
 
 
 
119
  labels = list(range(L))
120
  if class_mask:
121
  labels = [j for j in class_mask if 0 <= j < L]
@@ -124,7 +134,7 @@ class BalancedAccuracyMultilabel(evaluate.Metric):
124
 
125
  per_label_ba = []
126
  per_label_thr = []
127
- # ---- compute per-label BA ----
128
  if from_probas and threshold == "auto":
129
  for j in labels:
130
  vmask = valid[:, j]
@@ -141,29 +151,33 @@ class BalancedAccuracyMultilabel(evaluate.Metric):
141
  for j in labels:
142
  vmask = valid[:, j]
143
  yt, yp = y_true[vmask, j], y_pred[vmask, j]
144
- tp = float(((yt == 1) & (yp == 1)).sum())
145
- fn = float(((yt == 1) & (yp == 0)).sum())
146
- tn = float(((yt == 0) & (yp == 0)).sum())
147
- fp = float(((yt == 0) & (yp == 1)).sum())
 
 
 
 
 
 
 
148
  per_label_ba.append(float(_binary_ba(tp, fn, tn, fp, zero_division)))
149
 
150
  per_label_ba = np.asarray(per_label_ba, dtype=float)
151
 
152
- # 新增:每标签支持度(正样本数;考虑 ignore_index,有 class_mask 时按其顺序)
153
  support_per_label = []
154
  for j in labels:
155
  vmask = valid[:, j]
156
  support_per_label.append(int(y_true[vmask, j].sum()))
157
 
158
- # ---- aggregate ----
159
  if average == "macro":
160
  score = float(np.mean(per_label_ba)) if per_label_ba.size else float("nan")
161
  elif average == "weighted":
162
  weights = np.asarray(support_per_label, dtype=float)
163
- if weights.sum() == 0:
164
- score = float(np.mean(per_label_ba)) if per_label_ba.size else float("nan")
165
- else:
166
- score = float(np.average(per_label_ba, weights=weights))
167
  elif average == "micro":
168
  TP = FP = TN = FN = 0.0
169
  for j in labels:
@@ -174,10 +188,17 @@ class BalancedAccuracyMultilabel(evaluate.Metric):
174
  yp = (probs[vmask, j] >= t).astype(int)
175
  else:
176
  yp = y_pred[vmask, j] if 'y_pred' in locals() else (probs[vmask, j] >= 0.5).astype(int)
177
- TP += float(((yt == 1) & (yp == 1)).sum())
178
- FN += float(((yt == 1) & (yp == 0)).sum())
179
- TN += float(((yt == 0) & (yp == 0)).sum())
180
- FP += float(((yt == 0) & (yp == 1)).sum())
 
 
 
 
 
 
 
181
  score = float(_binary_ba(TP, FN, TN, FP, zero_division))
182
  else:
183
  raise ValueError("average must be one of {'macro','weighted','micro'}.")
 
12
  - class_mask=[...] (evaluate a subset of labels)
13
  - ignore_index to skip unlabeled samples (e.g., -100)
14
  - support_per_label: when return_per_label=True, also return true positive counts per label (after masking)
15
+ - sample_weight: per-sample weights; confusion counts become weighted sums for each label
16
  """
17
 
18
  _KWARGS_DESCRIPTION = """
 
27
  class_mask: Optional[list[int]] — only average over these label indices.
28
  ignore_index: int | None, default None.
29
  return_per_label: bool, default False — also return per-label BA list (after masking).
30
+ sample_weight: Optional[list[float]] — per-sample weights.
31
  Returns:
32
  {"balanced_accuracy": float}
33
  + (from_probas & threshold='auto'): {"per_label_thresholds": list[float]}
 
100
  class_mask: list[int] | None = None,
101
  ignore_index: int | None = None,
102
  return_per_label: bool = False,
103
+ sample_weight: list[float] | None = None,
104
  ):
105
  y_true_in = np.asarray(references, dtype=float)
106
  y_pred_in = np.asarray(predictions, dtype=float)
 
118
  y_true = (y_true_in == 1).astype(int)
119
  probs = y_pred_in
120
 
121
+ N, L = y_true.shape
122
+ if sample_weight is not None:
123
+ w_in = np.asarray(sample_weight, dtype=float)
124
+ if w_in.shape[0] != N:
125
+ raise ValueError("sample_weight length must match number of samples.")
126
+ else:
127
+ w_in = None
128
+
129
  labels = list(range(L))
130
  if class_mask:
131
  labels = [j for j in class_mask if 0 <= j < L]
 
134
 
135
  per_label_ba = []
136
  per_label_thr = []
137
+
138
  if from_probas and threshold == "auto":
139
  for j in labels:
140
  vmask = valid[:, j]
 
151
  for j in labels:
152
  vmask = valid[:, j]
153
  yt, yp = y_true[vmask, j], y_pred[vmask, j]
154
+ if w_in is None:
155
+ tp = float(((yt == 1) & (yp == 1)).sum())
156
+ fn = float(((yt == 1) & (yp == 0)).sum())
157
+ tn = float(((yt == 0) & (yp == 0)).sum())
158
+ fp = float(((yt == 0) & (yp == 1)).sum())
159
+ else:
160
+ wv = w_in[vmask]
161
+ tp = float(wv[((yt == 1) & (yp == 1))].sum())
162
+ fn = float(wv[((yt == 1) & (yp == 0))].sum())
163
+ tn = float(wv[((yt == 0) & (yp == 0))].sum())
164
+ fp = float(wv[((yt == 0) & (yp == 1))].sum())
165
  per_label_ba.append(float(_binary_ba(tp, fn, tn, fp, zero_division)))
166
 
167
  per_label_ba = np.asarray(per_label_ba, dtype=float)
168
 
 
169
  support_per_label = []
170
  for j in labels:
171
  vmask = valid[:, j]
172
  support_per_label.append(int(y_true[vmask, j].sum()))
173
 
 
174
  if average == "macro":
175
  score = float(np.mean(per_label_ba)) if per_label_ba.size else float("nan")
176
  elif average == "weighted":
177
  weights = np.asarray(support_per_label, dtype=float)
178
+ score = float(np.average(per_label_ba, weights=weights)) if weights.sum() > 0 else (
179
+ float(np.mean(per_label_ba)) if per_label_ba.size else float("nan")
180
+ )
 
181
  elif average == "micro":
182
  TP = FP = TN = FN = 0.0
183
  for j in labels:
 
188
  yp = (probs[vmask, j] >= t).astype(int)
189
  else:
190
  yp = y_pred[vmask, j] if 'y_pred' in locals() else (probs[vmask, j] >= 0.5).astype(int)
191
+ if w_in is None:
192
+ TP += float(((yt == 1) & (yp == 1)).sum())
193
+ FN += float(((yt == 1) & (yp == 0)).sum())
194
+ TN += float(((yt == 0) & (yp == 0)).sum())
195
+ FP += float(((yt == 0) & (yp == 1)).sum())
196
+ else:
197
+ wv = w_in[vmask]
198
+ TP += float(wv[((yt == 1) & (yp == 1))].sum())
199
+ FN += float(wv[((yt == 1) & (yp == 0))].sum())
200
+ TN += float(wv[((yt == 0) & (yp == 0))].sum())
201
+ FP += float(wv[((yt == 0) & (yp == 1))].sum())
202
  score = float(_binary_ba(TP, FN, TN, FP, zero_division))
203
  else:
204
  raise ValueError("average must be one of {'macro','weighted','micro'}.")
balanced_topk_accuracy.py CHANGED
@@ -6,6 +6,8 @@ _DESCRIPTION = """
6
  Balanced (macro) Top-K Accuracy for multiclass classification.
7
  For each class c, compute recall@k (fraction of samples of class c whose top-k predictions contain c),
8
  then macro-average over classes. Accepts (N, K) score/prob arrays.
 
 
9
  """
10
 
11
  _KWARGS_DESCRIPTION = """
@@ -15,6 +17,7 @@ Args:
15
  k: int, top-k (default 1). If None, use k_list instead.
16
  k_list: Optional[list[int]] to compute multiple ks at once (e.g., [1,5]).
17
  class_mask: Optional[list[int]] — only average over these classes (e.g., tail classes).
 
18
  zero_division: float, default 0.0. Used when a class has no positive samples.
19
  return_per_class: bool, default False. If True, also return per-class recalls@k.
20
  Returns:
@@ -25,6 +28,10 @@ Returns:
25
  _CITATION = ""
26
 
27
 
 
 
 
 
28
  class BalancedTopKAccuracy(evaluate.Metric):
29
  def _info(self):
30
  return evaluate.MetricInfo(
@@ -32,10 +39,8 @@ class BalancedTopKAccuracy(evaluate.Metric):
32
  citation=_CITATION,
33
  inputs_description=_KWARGS_DESCRIPTION,
34
  features=datasets.Features(
35
- {
36
- "predictions": datasets.Sequence(datasets.Value("float64")),
37
- "references": datasets.Value("int64"),
38
- }
39
  ),
40
  )
41
 
@@ -46,6 +51,7 @@ class BalancedTopKAccuracy(evaluate.Metric):
46
  k: int | None = 1,
47
  k_list: list[int] | None = None,
48
  class_mask: list[int] | None = None,
 
49
  zero_division: float = 0.0,
50
  return_per_class: bool = False,
51
  ):
@@ -60,6 +66,12 @@ class BalancedTopKAccuracy(evaluate.Metric):
60
  if (y_true < 0).any() or (y_true >= K).any():
61
  raise ValueError(f"references must be within [0, {K-1}] for given predictions shape (N, {K}).")
62
 
 
 
 
 
 
 
63
  ks_in = k_list if k_list is not None else [k if k is not None else 1]
64
  ks = sorted(set(int(x) for x in ks_in if x is not None and int(x) >= 1))
65
  if not ks:
@@ -82,13 +94,14 @@ class BalancedTopKAccuracy(evaluate.Metric):
82
  topk = sorted_idx[:, :kk]
83
  recalls = []
84
  for c in classes:
85
- mask = (y_true == c)
86
- denom = int(mask.sum())
87
  if denom == 0:
88
  recalls.append(float(zero_division))
89
  continue
90
- hits = int(np.any(topk[mask] == c, axis=1).sum())
91
- recalls.append(hits / denom)
 
92
 
93
  ba_k = float(np.mean(recalls))
94
  results[kk] = ba_k
 
6
  Balanced (macro) Top-K Accuracy for multiclass classification.
7
  For each class c, compute recall@k (fraction of samples of class c whose top-k predictions contain c),
8
  then macro-average over classes. Accepts (N, K) score/prob arrays.
9
+
10
+ Supports sample_weight to compute weighted recalls per class.
11
  """
12
 
13
  _KWARGS_DESCRIPTION = """
 
17
  k: int, top-k (default 1). If None, use k_list instead.
18
  k_list: Optional[list[int]] to compute multiple ks at once (e.g., [1,5]).
19
  class_mask: Optional[list[int]] — only average over these classes (e.g., tail classes).
20
+ sample_weight: Optional[list[float]] — per-sample weights.
21
  zero_division: float, default 0.0. Used when a class has no positive samples.
22
  return_per_class: bool, default False. If True, also return per-class recalls@k.
23
  Returns:
 
28
  _CITATION = ""
29
 
30
 
31
+ def _div(a, b, zero_div=0.0):
32
+ return (a / b) if b != 0 else float(zero_div)
33
+
34
+
35
  class BalancedTopKAccuracy(evaluate.Metric):
36
  def _info(self):
37
  return evaluate.MetricInfo(
 
39
  citation=_CITATION,
40
  inputs_description=_KWARGS_DESCRIPTION,
41
  features=datasets.Features(
42
+ {"predictions": datasets.Sequence(datasets.Value("float64")),
43
+ "references": datasets.Value("int64")}
 
 
44
  ),
45
  )
46
 
 
51
  k: int | None = 1,
52
  k_list: list[int] | None = None,
53
  class_mask: list[int] | None = None,
54
+ sample_weight: list[float] | None = None,
55
  zero_division: float = 0.0,
56
  return_per_class: bool = False,
57
  ):
 
66
  if (y_true < 0).any() or (y_true >= K).any():
67
  raise ValueError(f"references must be within [0, {K-1}] for given predictions shape (N, {K}).")
68
 
69
+ w = None
70
+ if sample_weight is not None:
71
+ w = np.asarray(sample_weight, dtype=float)
72
+ if w.shape[0] != N:
73
+ raise ValueError("sample_weight length must match number of samples.")
74
+
75
  ks_in = k_list if k_list is not None else [k if k is not None else 1]
76
  ks = sorted(set(int(x) for x in ks_in if x is not None and int(x) >= 1))
77
  if not ks:
 
94
  topk = sorted_idx[:, :kk]
95
  recalls = []
96
  for c in classes:
97
+ mask_c = (y_true == c)
98
+ denom = float(w[mask_c].sum()) if w is not None else float(mask_c.sum())
99
  if denom == 0:
100
  recalls.append(float(zero_division))
101
  continue
102
+ hit_mask = np.any(topk[mask_c] == c, axis=1)
103
+ hits = float(w[mask_c][hit_mask].sum()) if w is not None else float(hit_mask.sum())
104
+ recalls.append(_div(hits, denom, zero_division))
105
 
106
  ba_k = float(np.mean(recalls))
107
  results[kk] = ba_k