balanced-accuracy / balanced_accuracy.py
OliverOnHF's picture
Upload balanced_accuracy.py
831915f verified
import numpy as np
import evaluate
import datasets
_DESCRIPTION = """
Balanced Accuracy for imbalanced classification.
Definitions
- Binary: (TPR + TNR) / 2
- Multiclass: macro-average of per-class recall
Extras
- threshold="auto": pick the best threshold for binary probabilities (Youden's J)
- ignore_index: skip unlabeled samples (e.g., -100)
- adjusted=True: sklearn-style chance correction
- return_per_class=True: also return per-class recalls (multiclass)
- class_mask=[...] (multiclass): average over a subset of classes
- support_per_class: when return_per_class=True (multiclass), also return true sample counts per class
- sample_weight: per-sample weights for binary/multiclass; replaces counts with weighted sums
"""
_KWARGS_DESCRIPTION = """
Args:
predictions: 1D list/array.
Binary: integer labels {0,1}, or probabilities in [0,1] (if threshold given)
Multiclass: integer labels {0..K-1}
references: 1D list/array of integer labels.
task: "binary" | "multiclass" (Default: "binary")
num_classes: int, for multiclass; inferred if labels are 0..K-1.
adjusted: bool, default False. (binary: 2*BA-1; multiclass: (BA-1/K)/(1-1/K))
zero_division: float, default 0.0.
threshold: float in (0,1) or "auto" (binary only).
ignore_index: int | None. If set, samples with reference == ignore_index are skipped.
return_per_class: bool, default False — also return per-class recalls list (multiclass).
class_mask: Optional[list[int]] — only average over these classes (multiclass).
sample_weight: Optional[list[float]] — per-sample weights.
Returns:
{"balanced_accuracy": float}
+ (binary, threshold="auto"): {"optimal_threshold": float}
+ (multiclass, return_per_class=True):
{"per_class_recall": list[float], "support_per_class": list[int or float]}
"""
_CITATION = ""
def _safe_div(num, den, zero_div=0.0):
num = np.asarray(num, dtype=float)
den = np.asarray(den, dtype=float)
out = np.full_like(num, float(zero_div))
mask = den != 0
out[mask] = num[mask] / den[mask]
return out
def _is_integer_like(arr, atol=1e-12):
"""Return True if all values are finite and very close to integers."""
arr = np.asarray(arr, dtype=float)
if not np.all(np.isfinite(arr)):
return False
return np.all(np.abs(arr - np.round(arr)) <= atol)
def _check_1d_same_len(y_true, y_pred, name_true="references", name_pred="predictions"):
if y_true.ndim != 1 or y_pred.ndim != 1:
raise ValueError(f"`{name_true}` and `{name_pred}` must be 1D.")
if y_true.shape[0] != y_pred.shape[0]:
raise ValueError(f"Length mismatch: `{name_true}`={y_true.shape[0]} vs `{name_pred}`={y_pred.shape[0]}.")
if not np.all(np.isfinite(y_pred)):
raise ValueError("`predictions` contains NaN/Inf.")
def _binary_ba_from_labels(y_true_i, y_pred_i, zero_div):
tp = float(((y_true_i == 1) & (y_pred_i == 1)).sum())
fn = float(((y_true_i == 1) & (y_pred_i == 0)).sum())
tn = float(((y_true_i == 0) & (y_pred_i == 0)).sum())
fp = float(((y_true_i == 0) & (y_pred_i == 1)).sum())
tpr = _safe_div(tp, tp + fn, zero_div)
tnr = _safe_div(tn, tn + fp, zero_div)
return 0.5 * (tpr + tnr)
def _binary_find_best_threshold(y_true, probs, zero_div):
p = np.asarray(probs, dtype=float)
uniq = np.unique(p)
if uniq.size == 1:
candidates = [float(uniq[0])]
else:
mids = (uniq[:-1] + uniq[1:]) / 2.0
candidates = [float(uniq[0] - 1e-12), *mids.tolist(), float(uniq[-1] + 1e-12)]
best_t, best_ba = None, -1.0
yt = (np.asarray(y_true) == 1).astype(int)
for t in candidates:
yp = (p >= t).astype(int)
ba = _binary_ba_from_labels(yt, yp, zero_div)
if (ba > best_ba) or (abs(ba - best_ba) < 1e-12 and (best_t is None or abs(t - 0.5) < abs(best_t - 0.5))):
best_ba, best_t = float(ba), float(t)
return best_t, best_ba
class BalancedAccuracy(evaluate.Metric):
def _info(self):
return evaluate.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{"predictions": datasets.Value("float64"),
"references": datasets.Value("float64")}
),
reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.balanced_accuracy_score.html"
],
)
def _compute(
self,
predictions,
references,
task: str = "binary",
num_classes: int | None = None,
adjusted: bool = False,
zero_division: float = 0.0,
threshold: float | str | None = None,
ignore_index: int | None = None,
return_per_class: bool = False,
class_mask: list[int] | None = None,
sample_weight: list[float] | None = None,
):
y_true_all = np.asarray(references).astype(int)
y_pred_all = np.asarray(predictions)
# ignore_index mask
if ignore_index is not None:
mask = y_true_all != ignore_index
else:
mask = np.ones_like(y_true_all, dtype=bool)
y_true = y_true_all[mask]
y_pred_in = y_pred_all[mask]
if y_true.size == 0:
return {"balanced_accuracy": float("nan"), "reason": "empty_after_ignore_index"}
# weights
w = None
if sample_weight is not None:
w_in = np.asarray(sample_weight, dtype=float)
if w_in.shape[0] != y_true_all.shape[0]:
raise ValueError("`sample_weight` length must match number of samples.")
w = w_in[mask]
_check_1d_same_len(y_true, y_pred_in)
# ---- Binary ----
if task == "binary":
uniq_pred = np.unique(y_pred_in)
if np.isin(uniq_pred, [0.0, 1.0]).all():
y_pred = y_pred_in.astype(int)
elif _is_integer_like(y_pred_in):
raise ValueError("For binary with label predictions, values must be 0/1.")
else:
if np.any((y_pred_in < 0) | (y_pred_in > 1)):
raise ValueError("For binary with probabilities, `predictions` must be in [0,1].")
if threshold == "auto":
t_opt, ba = _binary_find_best_threshold(y_true, y_pred_in, zero_division)
if adjusted:
ba = 2 * ba - 1
return {"balanced_accuracy": float(ba), "optimal_threshold": float(t_opt)}
else:
t = 0.5 if (threshold is None) else float(threshold)
if not (0.0 < t < 1.0):
raise ValueError("`threshold` must be in (0,1) or 'auto'.")
y_pred = (y_pred_in >= t).astype(int)
# weighted confusion
if w is None:
tp = float(((y_true == 1) & (y_pred == 1)).sum())
fn = float(((y_true == 1) & (y_pred == 0)).sum())
tn = float(((y_true == 0) & (y_pred == 0)).sum())
fp = float(((y_true == 0) & (y_pred == 1)).sum())
else:
tp = float(w[((y_true == 1) & (y_pred == 1))].sum())
fn = float(w[((y_true == 1) & (y_pred == 0))].sum())
tn = float(w[((y_true == 0) & (y_pred == 0))].sum())
fp = float(w[((y_true == 0) & (y_pred == 1))].sum())
tpr = _safe_div(tp, tp + fn, zero_division)
tnr = _safe_div(tn, tn + fp, zero_division)
ba = 0.5 * (tpr + tnr)
if adjusted:
ba = 2 * ba - 1
return {"balanced_accuracy": float(ba)}
# ---- Multiclass ----
if task != "multiclass":
raise ValueError("`task` must be 'binary' or 'multiclass'.")
y_pred = y_pred_in.astype(int)
if num_classes is None:
num_classes = int(max(y_true.max() if y_true.size else 0,
y_pred.max() if y_pred.size else 0)) + 1
if num_classes <= 0:
raise ValueError("`num_classes` must be positive.")
if (y_pred < 0).any() or (y_pred >= num_classes).any():
raise ValueError(f"`predictions` must be in [0,{num_classes-1}] for multiclass.")
if (y_true < 0).any() or (y_true >= num_classes).any():
raise ValueError(f"`references` must be in [0,{num_classes-1}] for multiclass.")
classes = list(range(num_classes))
if class_mask is not None and len(class_mask) > 0:
classes = [c for c in class_mask if 0 <= c < num_classes]
if len(classes) == 0:
return {"balanced_accuracy": float("nan"), "reason": "empty_class_mask_after_filtering"}
recalls, supports = [], []
for c in classes:
mask_c = (y_true == c)
if w is None:
denom = float(mask_c.sum())
num = float((mask_c & (y_pred == c)).sum())
supports.append(int(denom))
else:
denom = float((w[mask_c]).sum())
num = float((w[mask_c & (y_pred == c)]).sum())
supports.append(float(denom))
recalls.append(float(_safe_div(num, denom, zero_division)))
recall_c = np.asarray(recalls, dtype=float)
ba = float(recall_c.mean())
if adjusted:
chance = 1.0 / float(len(classes))
ba = float((ba - chance) / (1.0 - chance))
out = {"balanced_accuracy": ba}
if return_per_class:
out["per_class_recall"] = recall_c.tolist()
out["support_per_class"] = supports
return out