Upload balanced_accuracy.py
Browse files- balanced_accuracy.py +93 -65
balanced_accuracy.py
CHANGED
|
@@ -9,28 +9,34 @@ Definitions
|
|
| 9 |
- Binary: (TPR + TNR) / 2
|
| 10 |
- Multiclass: macro-average of per-class recall
|
| 11 |
|
| 12 |
-
|
| 13 |
-
-
|
| 14 |
- ignore_index: skip unlabeled samples (e.g., -100)
|
| 15 |
- adjusted=True: sklearn-style chance correction
|
|
|
|
|
|
|
| 16 |
"""
|
| 17 |
|
| 18 |
_KWARGS_DESCRIPTION = """
|
| 19 |
Args:
|
| 20 |
-
predictions: 1D list/array.
|
| 21 |
-
|
| 22 |
-
|
| 23 |
references: 1D list/array of integer labels.
|
| 24 |
-
task: "binary" | "multiclass"
|
| 25 |
-
num_classes: int,
|
| 26 |
adjusted: bool, default False. (binary: 2*BA-1; multiclass: (BA-1/K)/(1-1/K))
|
| 27 |
-
zero_division: float, default 0.0. Value used when a denominator is 0
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
|
|
|
|
|
|
|
|
|
| 32 |
Returns:
|
| 33 |
-
dict
|
|
|
|
| 34 |
"""
|
| 35 |
|
| 36 |
_CITATION = ""
|
|
@@ -45,26 +51,33 @@ def _safe_div(num, den, zero_div=0.0):
|
|
| 45 |
return out
|
| 46 |
|
| 47 |
|
| 48 |
-
def
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
return np.argmax(y_pred, axis=1).astype(int)
|
| 57 |
-
raise ValueError("Binary from_probas expects 1D (prob) or 2D (scores) predictions.")
|
| 58 |
-
return y_pred.astype(int)
|
| 59 |
|
| 60 |
|
| 61 |
-
def
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
|
| 69 |
|
| 70 |
class BalancedAccuracy(evaluate.Metric):
|
|
@@ -92,56 +105,71 @@ class BalancedAccuracy(evaluate.Metric):
|
|
| 92 |
num_classes: int | None = None,
|
| 93 |
adjusted: bool = False,
|
| 94 |
zero_division: float = 0.0,
|
| 95 |
-
|
| 96 |
-
threshold: float = 0.5,
|
| 97 |
-
proba_pos_index: int = 1,
|
| 98 |
ignore_index: int | None = None,
|
|
|
|
|
|
|
| 99 |
):
|
| 100 |
-
y_true = np.asarray(references)
|
| 101 |
-
|
| 102 |
|
| 103 |
if ignore_index is not None:
|
| 104 |
mask = y_true != ignore_index
|
| 105 |
y_true = y_true[mask]
|
| 106 |
-
|
| 107 |
if y_true.size == 0:
|
| 108 |
return {"balanced_accuracy": float("nan")}
|
| 109 |
|
| 110 |
if y_true.ndim != 1:
|
| 111 |
raise ValueError("`references` must be 1D integer labels, e.g., [0,1,1,0].")
|
|
|
|
|
|
|
| 112 |
|
|
|
|
| 113 |
if task == "binary":
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 124 |
if adjusted:
|
| 125 |
-
ba = 2 * ba - 1
|
| 126 |
return {"balanced_accuracy": float(ba)}
|
| 127 |
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
else:
|
| 136 |
-
num_classes = int(max(y_true.max(), y_pred.max())) + 1
|
| 137 |
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 146 |
|
| 147 |
-
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
- Binary: (TPR + TNR) / 2
|
| 10 |
- Multiclass: macro-average of per-class recall
|
| 11 |
|
| 12 |
+
Extras
|
| 13 |
+
- threshold="auto": pick the best threshold for binary probabilities (Youden's J)
|
| 14 |
- ignore_index: skip unlabeled samples (e.g., -100)
|
| 15 |
- adjusted=True: sklearn-style chance correction
|
| 16 |
+
- return_per_class=True: also return per-class recalls (multiclass)
|
| 17 |
+
- class_mask=[...] (multiclass): average over a subset of classes
|
| 18 |
"""
|
| 19 |
|
| 20 |
_KWARGS_DESCRIPTION = """
|
| 21 |
Args:
|
| 22 |
+
predictions: 1D list/array.
|
| 23 |
+
Binary: integer labels {0,1}, or probabilities in [0,1] (if threshold given)
|
| 24 |
+
Multiclass: integer labels {0..K-1}
|
| 25 |
references: 1D list/array of integer labels.
|
| 26 |
+
task: "binary" | "multiclass" (Default: "binary")
|
| 27 |
+
num_classes: int, for multiclass; inferred if labels are 0..K-1.
|
| 28 |
adjusted: bool, default False. (binary: 2*BA-1; multiclass: (BA-1/K)/(1-1/K))
|
| 29 |
+
zero_division: float, default 0.0. Value used when a denominator is 0.
|
| 30 |
+
threshold: float in (0,1) or "auto" (binary only).
|
| 31 |
+
- float: binarize probs via (prob >= threshold)
|
| 32 |
+
- "auto": choose threshold maximizing BA on given data (Youden's J)
|
| 33 |
+
- if None: treat `predictions` as 0/1 labels (no binarization)
|
| 34 |
+
ignore_index: int | None, default None. If set, samples with reference == ignore_index are skipped.
|
| 35 |
+
return_per_class: bool, default False (multiclass) — also return per-class recalls list.
|
| 36 |
+
class_mask: Optional[list[int]] — only average over these classes (multiclass).
|
| 37 |
Returns:
|
| 38 |
+
dict with at least {"balanced_accuracy": float}. If threshold="auto" (binary) adds {"optimal_threshold": float}.
|
| 39 |
+
If return_per_class=True (multiclass) adds {"per_class_recall": list[float]}.
|
| 40 |
"""
|
| 41 |
|
| 42 |
_CITATION = ""
|
|
|
|
| 51 |
return out
|
| 52 |
|
| 53 |
|
| 54 |
+
def _binary_ba_from_labels(y_true_i, y_pred_i, zero_div):
|
| 55 |
+
tp = float(((y_true_i == 1) & (y_pred_i == 1)).sum())
|
| 56 |
+
fn = float(((y_true_i == 1) & (y_pred_i == 0)).sum())
|
| 57 |
+
tn = float(((y_true_i == 0) & (y_pred_i == 0)).sum())
|
| 58 |
+
fp = float(((y_true_i == 0) & (y_pred_i == 1)).sum())
|
| 59 |
+
tpr = _safe_div(tp, tp + fn, zero_div)
|
| 60 |
+
tnr = _safe_div(tn, tn + fp, zero_div)
|
| 61 |
+
return 0.5 * (tpr + tnr)
|
|
|
|
|
|
|
|
|
|
| 62 |
|
| 63 |
|
| 64 |
+
def _binary_find_best_threshold(y_true, probs, zero_div):
|
| 65 |
+
p = np.asarray(probs, dtype=float)
|
| 66 |
+
uniq = np.unique(p)
|
| 67 |
+
candidates = []
|
| 68 |
+
if uniq.size == 1:
|
| 69 |
+
candidates = [uniq[0]]
|
| 70 |
+
else:
|
| 71 |
+
mids = (uniq[:-1] + uniq[1:]) / 2.0
|
| 72 |
+
candidates = [uniq[0] - 1e-12] + mids.tolist() + [uniq[-1] + 1e-12]
|
| 73 |
+
|
| 74 |
+
best_t, best_ba = None, -1.0
|
| 75 |
+
for t in candidates:
|
| 76 |
+
y_pred = (p >= t).astype(int)
|
| 77 |
+
ba = _binary_ba_from_labels(y_true, y_pred, zero_div)
|
| 78 |
+
if ba > best_ba or (abs(ba - best_ba) < 1e-12 and abs(t - 0.5) < abs(best_t - 0.5)):
|
| 79 |
+
best_ba, best_t = ba, t
|
| 80 |
+
return float(best_t), float(best_ba)
|
| 81 |
|
| 82 |
|
| 83 |
class BalancedAccuracy(evaluate.Metric):
|
|
|
|
| 105 |
num_classes: int | None = None,
|
| 106 |
adjusted: bool = False,
|
| 107 |
zero_division: float = 0.0,
|
| 108 |
+
threshold: float | str | None = None, # binary only: float or "auto" or None
|
|
|
|
|
|
|
| 109 |
ignore_index: int | None = None,
|
| 110 |
+
return_per_class: bool = False,
|
| 111 |
+
class_mask: list[int] | None = None,
|
| 112 |
):
|
| 113 |
+
y_true = np.asarray(references).astype(int)
|
| 114 |
+
y_pred_in = np.asarray(predictions)
|
| 115 |
|
| 116 |
if ignore_index is not None:
|
| 117 |
mask = y_true != ignore_index
|
| 118 |
y_true = y_true[mask]
|
| 119 |
+
y_pred_in = y_pred_in[mask]
|
| 120 |
if y_true.size == 0:
|
| 121 |
return {"balanced_accuracy": float("nan")}
|
| 122 |
|
| 123 |
if y_true.ndim != 1:
|
| 124 |
raise ValueError("`references` must be 1D integer labels, e.g., [0,1,1,0].")
|
| 125 |
+
if y_pred_in.ndim != 1:
|
| 126 |
+
raise ValueError("`predictions` must be 1D labels or probabilities (for binary).")
|
| 127 |
|
| 128 |
+
# ---- Binary ----
|
| 129 |
if task == "binary":
|
| 130 |
+
is_prob_like = not np.isin(np.unique(y_pred_in), [0.0, 1.0]).all()
|
| 131 |
+
|
| 132 |
+
if is_prob_like:
|
| 133 |
+
if threshold == "auto":
|
| 134 |
+
t_opt, ba = _binary_find_best_threshold(y_true, y_pred_in, zero_division)
|
| 135 |
+
if adjusted:
|
| 136 |
+
ba = 2 * ba - 1
|
| 137 |
+
return {"balanced_accuracy": float(ba), "optimal_threshold": float(t_opt)}
|
| 138 |
+
else:
|
| 139 |
+
t = 0.5 if (threshold is None) else float(threshold)
|
| 140 |
+
y_pred = (y_pred_in >= t).astype(int)
|
| 141 |
+
else:
|
| 142 |
+
y_pred = y_pred_in.astype(int)
|
| 143 |
+
|
| 144 |
+
ba = _binary_ba_from_labels(y_true, y_pred, zero_division)
|
| 145 |
if adjusted:
|
| 146 |
+
ba = 2 * ba - 1
|
| 147 |
return {"balanced_accuracy": float(ba)}
|
| 148 |
|
| 149 |
+
# ---- Multiclass ----
|
| 150 |
+
if task != "multiclass":
|
| 151 |
+
raise ValueError("`task` must be 'binary' or 'multiclass'.")
|
| 152 |
|
| 153 |
+
y_pred = y_pred_in.astype(int)
|
| 154 |
+
if num_classes is None:
|
| 155 |
+
num_classes = int(max(y_true.max() if y_true.size else 0, y_pred.max() if y_pred.size else 0)) + 1
|
|
|
|
|
|
|
| 156 |
|
| 157 |
+
classes = list(range(num_classes))
|
| 158 |
+
if class_mask is not None and len(class_mask) > 0:
|
| 159 |
+
classes = [c for c in class_mask if 0 <= c < num_classes]
|
| 160 |
+
if len(classes) == 0:
|
| 161 |
+
return {"balanced_accuracy": float("nan")}
|
| 162 |
+
|
| 163 |
+
tp = np.array([(y_pred[y_true == c] == c).sum() for c in classes], dtype=float)
|
| 164 |
+
fn = np.array([(y_pred[y_true == c] != c).sum() for c in classes], dtype=float)
|
| 165 |
+
recall_c = _safe_div(tp, tp + fn, zero_division)
|
| 166 |
+
ba = float(recall_c.mean())
|
| 167 |
+
|
| 168 |
+
if adjusted:
|
| 169 |
+
chance = 1.0 / float(len(classes))
|
| 170 |
+
ba = float((ba - chance) / (1.0 - chance))
|
| 171 |
|
| 172 |
+
out = {"balanced_accuracy": ba}
|
| 173 |
+
if return_per_class:
|
| 174 |
+
out["per_class_recall"] = recall_c.tolist()
|
| 175 |
+
return out
|