MacroLens / code /eval.py
itouchz's picture
Add files using upload-large-folder tool
23aada2 verified
"""MacroLens unified-API evaluation layer (Phase 1E).
Public API
----------
>>> import whatif_bench.eval as ev
>>> metrics = ev.score("T1", y_true, y_pred,
... cluster_keys=meta["ticker"].values,
... close_last=meta["close_last"].values)
>>> df = ev.compare_methods("T1", run_records, correction="holm")
Hard rules (definitive — see the unified-API plan §5 / §7b):
* **Default ``resample="cluster"``** — bootstrap by ``ticker`` for
T1 / T2 / T3 / T5 / T6 / T7, by ``scenario_id`` for T4. Statistically
correct on panel data.
* **Adaptive ``n_boot``** — start at B=1,000; if
``(ci_hi - ci_lo) / max(|mean|, 1e-12) > 0.05`` escalate to B=10,000.
Cap at 10,000. Actual ``B`` recorded on the returned ``MetricValue``.
* **Close-anchor DA everywhere** — for T1, directional accuracy is
``mean(sign(y_pred[t] - close_last) == sign(y_true[t] - close_last))``
over the horizon. The legacy ``np.diff``-based formula is REMOVED.
``close_last`` is supplied via the ``close_last=`` kwarg (or
``meta["close_last"]`` by the runner). When unavailable we fall back to
``y_pred[:, 0]`` as the anchor and document the fallback in the metric's
metadata.
* **APE clip uniformly at 10×.** With ``return_sensitivity=True`` we also
emit MAPE at clips ``{5, 10, 20, ∞}``.
* **Multiple-comparisons correction is per-task** (Holm or BH). NO
cross-task FWER claim.
* All metric values are wrapped in ``MetricValue`` Pydantic models.
The per-task numerical logic is lifted verbatim from the legacy
``agents/valuation/evaluate.py`` module (which still passes the
``tests/test_evaluator_contract.py`` contract).
This module is a leaf — it does NO IO, imports nothing from
``methods/``, ``dataloader/`` or ``experiments/``.
"""
from __future__ import annotations
import logging
from typing import Any, Callable, Iterable, Literal
import numpy as np
import pandas as pd
from .macrolens._types import MetricValue
logger = logging.getLogger(__name__)
# ===================================================================
# Constants
# ===================================================================
_BOOTSTRAP_INITIAL_N = 1_000
_BOOTSTRAP_MAX_N = 10_000
_BOOTSTRAP_CI_TOL = 0.05 # widen → escalate threshold
_APE_CLIP_DEFAULT = 10.0 # 1000% per-instance cap
_APE_SENSITIVITY_CLIPS: tuple[float, ...] = (5.0, 10.0, 20.0, float("inf"))
# ===================================================================
# Cluster bootstrap
# ===================================================================
def _bootstrap_ci(
values: np.ndarray,
*,
cluster_keys: np.ndarray | None = None,
agg_fn: Callable[[np.ndarray], float] = np.mean,
n_boot: int | Literal["adaptive"] = "adaptive",
alpha: float = 0.05,
seed: int = 42,
) -> tuple[float, float, float, float, int]:
"""Bootstrap confidence interval for ``agg_fn(values)``.
Parameters
----------
values
1-D float array of per-instance summary statistics.
cluster_keys
Optional cluster ID per row. When supplied, performs **cluster
bootstrap** (resample whole clusters with replacement; aggregate
all member rows). When ``None``, performs IID bootstrap.
agg_fn
Aggregator (default ``np.mean``).
n_boot
Either an explicit integer, or ``"adaptive"`` to start at 1,000 and
escalate to 10,000 if the CI half-width is wider than 5% of the
point estimate.
alpha
Two-sided coverage; default 0.05 → 95% CI.
seed
RNG seed.
Returns
-------
``(value, ci_lo, ci_hi, std, n_boot_used)``
"""
values = np.asarray(values, dtype=np.float64).ravel()
n = values.size
if n == 0:
nan = float("nan")
return nan, nan, nan, nan, 0
point = float(agg_fn(values))
# Build cluster index lookup once.
if cluster_keys is not None:
ck = np.asarray(cluster_keys).ravel()
if ck.size != n:
raise ValueError(
f"cluster_keys length {ck.size} != values length {n}"
)
# Map cluster → row indices.
unique_clusters, inverse = np.unique(ck, return_inverse=True)
# cluster_idx[c] = np.array of row positions in `values`.
cluster_rows: list[np.ndarray] = [
np.where(inverse == c)[0] for c in range(unique_clusters.size)
]
n_clusters = unique_clusters.size
else:
cluster_rows = []
n_clusters = 0
rng = np.random.default_rng(seed)
def _draw(b: int) -> np.ndarray:
out = np.empty(b, dtype=np.float64)
if cluster_keys is not None:
for i in range(b):
pick = rng.integers(0, n_clusters, size=n_clusters)
# Concatenate row indices for all picked clusters.
idx = np.concatenate([cluster_rows[c] for c in pick])
out[i] = agg_fn(values[idx])
else:
for i in range(b):
out[i] = agg_fn(values[rng.integers(0, n, size=n)])
return out
# Decide B.
if n_boot == "adaptive":
boot = _draw(_BOOTSTRAP_INITIAL_N)
lo = float(np.quantile(boot, alpha / 2))
hi = float(np.quantile(boot, 1 - alpha / 2))
rel_width = (hi - lo) / max(abs(point), 1e-12)
if rel_width > _BOOTSTRAP_CI_TOL and _BOOTSTRAP_MAX_N > _BOOTSTRAP_INITIAL_N:
extra = _draw(_BOOTSTRAP_MAX_N - _BOOTSTRAP_INITIAL_N)
boot = np.concatenate([boot, extra])
lo = float(np.quantile(boot, alpha / 2))
hi = float(np.quantile(boot, 1 - alpha / 2))
b_used = boot.size
else:
b_used = int(n_boot)
boot = _draw(b_used)
lo = float(np.quantile(boot, alpha / 2))
hi = float(np.quantile(boot, 1 - alpha / 2))
std = float(np.std(boot))
return point, lo, hi, std, b_used
def _wrap_metric(
values: np.ndarray,
*,
cluster_keys: np.ndarray | None,
agg_fn: Callable[[np.ndarray], float],
n_boot: int | Literal["adaptive"],
alpha: float,
seed: int,
resample: Literal["cluster", "iid"],
) -> MetricValue:
"""Bootstrap a per-instance vector and box it into a ``MetricValue``.
Returns a ``MetricValue`` with all fields ``None`` when ``values`` is
empty or every entry is non-finite (the metric cannot be defined).
"""
arr = np.asarray(values, dtype=np.float64).ravel()
finite_mask = np.isfinite(arr)
if arr.size == 0 or not finite_mask.any():
return _none_metric(resample=resample)
if not finite_mask.all():
# Drop non-finite entries; align cluster_keys if supplied.
if cluster_keys is not None:
ck_arr = np.asarray(cluster_keys).ravel()
if ck_arr.size == arr.size:
cluster_keys = ck_arr[finite_mask]
# else: leave cluster_keys alone — _align_cluster_keys upstream
# may have already pre-filtered.
arr = arr[finite_mask]
if resample == "iid":
ck = None
else:
ck = cluster_keys
# Cluster bootstrap with one unique cluster collapses to a delta — fall
# back to IID resampling on that array so the std is still defined.
if ck is not None:
unique_ck = np.unique(np.asarray(ck).ravel())
if unique_ck.size < 2:
ck = None
point, lo, hi, std, b_used = _bootstrap_ci(
arr,
cluster_keys=ck,
agg_fn=agg_fn,
n_boot=n_boot,
alpha=alpha,
seed=seed,
)
if not np.isfinite(point):
return _none_metric(resample=resample)
# CI half-width / std may legitimately collapse to 0 (1-row arrays); keep
# those numerics rather than substituting None.
lo_v = lo if np.isfinite(lo) else point
hi_v = hi if np.isfinite(hi) else point
std_v = std if np.isfinite(std) else 0.0
return MetricValue(
value=float(point), ci_lo=float(lo_v), ci_hi=float(hi_v),
std=float(std_v), n_boot=int(b_used),
resample=resample,
)
def _scalar_metric(
value: float | None,
*,
resample: Literal["cluster", "iid"],
n_boot: int = 0,
) -> MetricValue:
"""Wrap a deterministic scalar (e.g. counts) without a bootstrap.
When ``value`` is ``None`` or NaN we emit a ``MetricValue`` whose
``value`` / ``ci_lo`` / ``ci_hi`` / ``std`` are all ``None`` so
consumers can detect "metric not applicable" via ``value is None``
rather than with a NaN finiteness probe.
"""
if value is None or (isinstance(value, float) and np.isnan(value)):
return MetricValue(
value=None,
ci_lo=None,
ci_hi=None,
std=None,
n_boot=int(n_boot),
resample=resample,
)
v = float(value)
return MetricValue(
value=v,
ci_lo=v,
ci_hi=v,
std=0.0,
n_boot=int(n_boot),
resample=resample,
)
def _none_metric(
*,
resample: Literal["cluster", "iid"],
) -> MetricValue:
"""Return a ``MetricValue`` indicating "metric not applicable / not computed"."""
return MetricValue(
value=None, ci_lo=None, ci_hi=None, std=None,
n_boot=0, resample=resample,
)
# ===================================================================
# Anchored DA helper
# ===================================================================
def _close_anchor_da(
y_true: np.ndarray, y_pred: np.ndarray, close_last: np.ndarray,
) -> np.ndarray:
"""Per-row close-anchor directional accuracy (T1).
For each row ``i`` and horizon step ``t`` we compare
``sign(y_true[i, t] - close_last[i])`` to
``sign(y_pred[i, t] - close_last[i])``. Per-row DA is the mean over
the horizon. Returns a length-N float array (NaN allowed for rows
where ``close_last`` is NaN).
NB: the legacy ``np.diff`` formula is intentionally removed.
"""
y_true = np.asarray(y_true, dtype=np.float64)
y_pred = np.asarray(y_pred, dtype=np.float64)
cl = np.asarray(close_last, dtype=np.float64).reshape(-1, 1)
if y_true.shape != y_pred.shape:
raise ValueError(
f"_close_anchor_da: shape mismatch y_true {y_true.shape} vs y_pred {y_pred.shape}"
)
if cl.shape[0] != y_true.shape[0]:
raise ValueError(
f"_close_anchor_da: close_last length {cl.shape[0]} != y rows {y_true.shape[0]}"
)
true_sign = np.sign(y_true - cl)
pred_sign = np.sign(y_pred - cl)
agree = (true_sign == pred_sign).astype(np.float64)
return agree.mean(axis=1)
# ===================================================================
# Per-task helpers
# ===================================================================
def _ape_per_instance(
pred: np.ndarray, actual: np.ndarray, *, clip: float = _APE_CLIP_DEFAULT,
near_zero: float = 0.0,
) -> tuple[np.ndarray, np.ndarray]:
"""Return (ape_vector_pct, kept_row_mask) clipped at ``clip × 100 %``.
Rows where ``|actual| <= near_zero`` (or NaN) are dropped from the
returned vectors; the second return value is the boolean mask of rows
that survived (in the original ordering).
"""
pred = np.asarray(pred, dtype=np.float64).ravel()
actual = np.asarray(actual, dtype=np.float64).ravel()
mask = (
np.isfinite(pred)
& np.isfinite(actual)
& (np.abs(actual) > near_zero)
)
p = pred[mask]
a = actual[mask]
ape = np.abs((p - a) / a)
if np.isfinite(clip):
ape = np.minimum(ape, clip)
return ape * 100.0, mask # percent units
def _normalize_field_col(df: pd.DataFrame) -> pd.DataFrame:
"""T6 (Gen-Eval) GT uses ``generator_field``; T3 uses ``field``."""
if "field" not in df.columns and "generator_field" in df.columns:
return df.rename(columns={"generator_field": "field"})
return df
# -------------------------------------------------------------------
# T1 — Time-Series Forecasting
# -------------------------------------------------------------------
def _per_task_score_T1(
y_true: Any,
y_pred: Any,
*,
cluster_keys: np.ndarray | None,
close_last: np.ndarray | None,
n_boot: int | Literal["adaptive"],
alpha: float,
seed: int,
resample: Literal["cluster", "iid"],
return_sensitivity: bool,
) -> dict[str, MetricValue]:
y_true_a = np.asarray(y_true, dtype=np.float64)
y_pred_a = np.asarray(y_pred, dtype=np.float64).copy()
if y_true_a.ndim != 2 or y_pred_a.ndim != 2 or y_true_a.shape != y_pred_a.shape:
raise ValueError(
f"T1 score: shape mismatch — y_true {y_true_a.shape}, "
f"y_pred {y_pred_a.shape}; expected matching (N, horizon) arrays."
)
n, horizon = y_true_a.shape
if n == 0:
raise ValueError("T1 score: empty arrays.")
# NaN penalty: substitute any NaN/inf prediction row with ZERO.
# Failed parses thus get a clear no-signal penalty (MSE ≈ y_true²,
# MAE = |y_true|) that is distinct from any meaningful model output.
nan_row_mask = ~np.isfinite(y_pred_a).all(axis=1)
if nan_row_mask.any():
y_pred_a[nan_row_mask, :] = 0.0
# Per-instance aggregates (the bootstrap unit is the instance, with
# cluster bootstrap pooling across ticker rows).
per_inst_mse = ((y_pred_a - y_true_a) ** 2).mean(axis=1)
per_inst_mae = np.abs(y_pred_a - y_true_a).mean(axis=1)
# Close-anchor DA. Fall back to y_pred[:, 0] if unavailable (documented).
da_fallback = False
if close_last is None:
close_last_v = y_pred_a[:, 0].astype(np.float64)
da_fallback = True
logger.warning(
"T1 score: close_last not supplied; falling back to y_pred[:, 0] "
"as the directional anchor. This degrades the DA interpretation."
)
else:
close_last_v = np.asarray(close_last, dtype=np.float64).ravel()
per_inst_da = _close_anchor_da(y_true_a, y_pred_a, close_last_v)
out: dict[str, MetricValue] = {}
out["mse"] = _wrap_metric(
per_inst_mse, cluster_keys=cluster_keys, agg_fn=np.mean,
n_boot=n_boot, alpha=alpha, seed=seed, resample=resample,
)
out["mae"] = _wrap_metric(
per_inst_mae, cluster_keys=cluster_keys, agg_fn=np.mean,
n_boot=n_boot, alpha=alpha, seed=seed, resample=resample,
)
# rmse is sqrt(mean(mse_per_inst)) — bootstrap on the same sqrt(mean)
# aggregator gives an honest CI.
rmse_ck = cluster_keys if resample == "cluster" else None
if rmse_ck is not None:
unique_rmse_ck = np.unique(np.asarray(rmse_ck).ravel())
if unique_rmse_ck.size < 2:
rmse_ck = None
rmse_val, rmse_lo, rmse_hi, rmse_std, rmse_b = _bootstrap_ci(
per_inst_mse,
cluster_keys=rmse_ck,
agg_fn=lambda x: float(np.sqrt(np.mean(x))),
n_boot=n_boot, alpha=alpha, seed=seed,
)
if not np.isfinite(rmse_val):
out["rmse"] = _none_metric(resample=resample)
else:
out["rmse"] = MetricValue(
value=float(rmse_val),
ci_lo=float(rmse_lo) if np.isfinite(rmse_lo) else float(rmse_val),
ci_hi=float(rmse_hi) if np.isfinite(rmse_hi) else float(rmse_val),
std=float(rmse_std) if np.isfinite(rmse_std) else 0.0,
n_boot=int(rmse_b), resample=resample,
)
out["directional_accuracy"] = _wrap_metric(
per_inst_da, cluster_keys=cluster_keys, agg_fn=np.nanmean,
n_boot=n_boot, alpha=alpha, seed=seed, resample=resample,
)
# MASE — Mean Absolute Scaled Error. Per-instance MASE divides each
# row's MAE by the in-sample seasonal-naive MAE (1-step persistence on
# close_last as the anchor: |y[h+1] - y[h]| averaged over the lookback
# is approximated by |y_true[i, 0] - close_last[i]| as a proxy when
# only the last close is available). Cluster-bootstraps over instances.
denom = np.abs(y_true_a[:, 0] - close_last_v)
denom_safe = np.where(denom > 1e-9, denom, np.nan)
per_inst_mase = per_inst_mae / denom_safe
valid_mase = np.isfinite(per_inst_mase)
if valid_mase.any():
ck_mase = (cluster_keys[valid_mase]
if cluster_keys is not None else None)
out["mase"] = _wrap_metric(
per_inst_mase[valid_mase], cluster_keys=ck_mase, agg_fn=np.mean,
n_boot=n_boot, alpha=alpha, seed=seed, resample=resample,
)
else:
out["mase"] = _none_metric(resample=resample)
out["n_instances"] = _scalar_metric(n, resample=resample)
if da_fallback:
# Best-effort metadata — store a sentinel so consumers can detect.
out["directional_accuracy_anchor_fallback"] = _scalar_metric(
1.0, resample=resample,
)
if return_sensitivity:
# T1's natural target is MSE/MAE; APE-style sensitivity is most
# meaningful relative to ``close_last``. Compute APE between
# final-step prediction and the realised final close.
if close_last is not None:
denom = np.abs(close_last_v)
denom_mask = denom > 0
if denom_mask.any():
final_err = np.abs(y_pred_a[:, -1] - y_true_a[:, -1])
ape_full = (final_err[denom_mask] / denom[denom_mask]) * 100.0
for clip in _APE_SENSITIVITY_CLIPS:
if np.isfinite(clip):
clipped = np.minimum(ape_full, clip * 100.0)
else:
clipped = ape_full
key = (
f"mape_at_clip_{int(clip)}x" if np.isfinite(clip)
else "mape_at_clip_inf"
)
out[key] = _wrap_metric(
clipped,
cluster_keys=(
cluster_keys[denom_mask] if cluster_keys is not None
else None
),
agg_fn=np.mean, n_boot=n_boot, alpha=alpha, seed=seed,
resample=resample,
)
return out
# -------------------------------------------------------------------
# T2 / T5 — Point-in-time valuation
# -------------------------------------------------------------------
def _adapt_t2_t5(y_true: Any, y_pred: Any) -> tuple[pd.DataFrame, pd.DataFrame]:
"""Coerce (y_true, y_pred) into (predictions_df, ground_truth_df).
Accepts the unified-API loader contract for T2/T5: ``y_true`` is an
``np.ndarray (N,)`` of ``actual_market_cap`` values. Also tolerates
the legacy DataFrame form ``[ticker, date, actual_market_cap]``.
"""
if isinstance(y_true, pd.DataFrame) and "actual_market_cap" in y_true.columns:
gt = y_true.reset_index(drop=True)
elif isinstance(y_true, (np.ndarray, list, pd.Series)):
arr = np.asarray(y_true).ravel().astype(np.float64)
gt = pd.DataFrame({
"ticker": [f"row_{i}" for i in range(len(arr))],
"date": pd.NaT,
"actual_market_cap": arr,
})
else:
raise ValueError(
f"T2/T5 score: y_true must be ndarray (N,) or DataFrame; "
f"got {type(y_true).__name__}."
)
if isinstance(y_pred, pd.DataFrame):
if "predicted_equity_value" in y_pred.columns:
pred = y_pred.reset_index(drop=True)
else:
raise ValueError(
"T2/T5 score: y_pred DataFrame must have 'predicted_equity_value'."
)
else:
arr = np.asarray(y_pred).ravel()
if len(arr) != len(gt):
raise ValueError(
f"T2/T5 score: y_pred length {len(arr)} != y_true rows {len(gt)}."
)
pred = pd.DataFrame({
"ticker": gt["ticker"].values,
"date": gt["date"].values,
"predicted_equity_value": arr,
})
return pred, gt
def _per_task_score_T2_T5(
y_true: Any,
y_pred: Any,
*,
cluster_keys: np.ndarray | None,
n_boot: int | Literal["adaptive"],
alpha: float,
seed: int,
resample: Literal["cluster", "iid"],
return_sensitivity: bool,
) -> dict[str, MetricValue]:
pred_df, gt_df = _adapt_t2_t5(y_true, y_pred)
merged = pred_df.merge(gt_df, on=["ticker", "date"], how="inner")
# NaN predictions: penalize as 100% APE (substitute median of y_true so the
# ratio is 1.0). Drops only rows with NaN ground truth or non-positive y_true
# — those are eval-side data issues, not method failures.
# Ground truth must never be NaN — if it is, that's a data-side bug
# (loader / preprocessing). Surface it instead of silently dropping.
gt_nan = merged["actual_market_cap"].isna().sum()
if gt_nan > 0:
raise ValueError(
f"T2/T5 score: {gt_nan} rows have NaN ground truth (actual_market_cap). "
"This is a loader/preprocessing bug — fix at data source."
)
valid = merged[merged["actual_market_cap"] > 0].reset_index(drop=True)
if valid.empty:
# No overlap between predictions and ground truth (or no positive
# ground truth): every gt row is "missing prediction" → fillna(0)
# penalty rule applies → APE = 100% per row. Saturate so the cell
# still scores (no silent score_failed).
gt_act = pd.to_numeric(gt_df["actual_market_cap"], errors="coerce").values.astype(np.float64)
gt_keep = np.isfinite(gt_act) & (gt_act > 0)
if gt_keep.any():
ape_gt = np.minimum(
np.abs(gt_act[gt_keep]) / np.abs(gt_act[gt_keep]),
_APE_CLIP_DEFAULT,
) * 100.0
ck = gt_df["ticker"].astype(str).values[gt_keep] if resample == "cluster" else None
out: dict[str, MetricValue] = {
"mape": _wrap_metric(ape_gt, cluster_keys=ck, agg_fn=np.mean,
n_boot=n_boot, alpha=alpha, seed=seed, resample=resample),
"median_ape": _wrap_metric(ape_gt, cluster_keys=ck, agg_fn=np.median,
n_boot=n_boot, alpha=alpha, seed=seed, resample=resample),
"rank_correlation": _scalar_metric(None, resample=resample),
"rank_p_value": _scalar_metric(None, resample=resample),
"n_predictions": _scalar_metric(0, resample=resample),
"n_tickers": _scalar_metric(0, resample=resample),
}
return out
# Fully degenerate (no rows at all on either side) — last-resort scalar.
return {
"mape": _scalar_metric(100.0, resample=resample),
"median_ape": _scalar_metric(100.0, resample=resample),
"rank_correlation": _scalar_metric(None, resample=resample),
"rank_p_value": _scalar_metric(None, resample=resample),
"n_predictions": _scalar_metric(0, resample=resample),
"n_tickers": _scalar_metric(0, resample=resample),
}
# NaN penalty: substitute NaN predictions with ZERO (no-signal). APE
# = |0 - actual| / |actual| = 100% per row, then clipped at clip_default.
nan_mask = ~np.isfinite(valid["predicted_equity_value"].values)
n_nan_substituted = int(nan_mask.sum())
valid.loc[nan_mask, "predicted_equity_value"] = 0.0
# APE clipped at 10× = 1000%, returned in percent.
ape_pct, kept_mask = _ape_per_instance(
valid["predicted_equity_value"].values,
valid["actual_market_cap"].values,
clip=_APE_CLIP_DEFAULT,
)
valid_kept = valid.loc[kept_mask].reset_index(drop=True)
cluster_kept = (
valid_kept["ticker"].astype(str).values
if cluster_keys is None
else _align_cluster_keys(cluster_keys, len(valid), kept_mask)
)
out: dict[str, MetricValue] = {}
out["mape"] = _wrap_metric(
ape_pct, cluster_keys=cluster_kept, agg_fn=np.mean,
n_boot=n_boot, alpha=alpha, seed=seed, resample=resample,
)
out["median_ape"] = _wrap_metric(
ape_pct, cluster_keys=cluster_kept, agg_fn=np.median,
n_boot=n_boot, alpha=alpha, seed=seed, resample=resample,
)
# Spearman rank correlation (closed-form). When either input vector is
# constant (e.g. dry-run engines emit a single placeholder value) scipy
# returns NaN; emit None so the metric is treated as "not applicable".
from scipy.stats import spearmanr
rho_raw, p_raw = spearmanr(
valid_kept["predicted_equity_value"].values,
valid_kept["actual_market_cap"].values,
)
rho = float(rho_raw) if rho_raw is not None and not np.isnan(rho_raw) else None
p_val = float(p_raw) if p_raw is not None and not np.isnan(p_raw) else None
out["rank_correlation"] = _scalar_metric(rho, resample=resample)
out["rank_p_value"] = _scalar_metric(p_val, resample=resample)
out["n_predictions"] = _scalar_metric(int(len(valid_kept)), resample=resample)
out["n_tickers"] = _scalar_metric(
int(valid_kept["ticker"].nunique()), resample=resample,
)
if return_sensitivity:
raw_pred = valid["predicted_equity_value"].values
raw_act = valid["actual_market_cap"].values
for clip in _APE_SENSITIVITY_CLIPS:
ape_v, mask = _ape_per_instance(raw_pred, raw_act, clip=clip)
ck_v = _align_cluster_keys(
cluster_keys if cluster_keys is not None
else valid["ticker"].astype(str).values,
len(valid), mask,
)
key = (
f"mape_at_clip_{int(clip)}x" if np.isfinite(clip)
else "mape_at_clip_inf"
)
out[key] = _wrap_metric(
ape_v, cluster_keys=ck_v, agg_fn=np.mean,
n_boot=n_boot, alpha=alpha, seed=seed, resample=resample,
)
return out
# -------------------------------------------------------------------
# T3 / T6 — Statement-/Generation-eval
# -------------------------------------------------------------------
def _per_task_score_T3_T6(
y_true: Any,
y_pred: Any,
*,
task: str,
cluster_keys: np.ndarray | None,
n_boot: int | Literal["adaptive"],
alpha: float,
seed: int,
resample: Literal["cluster", "iid"],
return_sensitivity: bool,
) -> dict[str, MetricValue]:
"""Inputs are long-form DataFrames.
* y_true: ``[ticker, fiscal_year, field, value]`` (T6 GT may use
``generator_field`` instead of ``field``; we normalise).
* y_pred: ``[ticker, fiscal_year, field, pred]`` (or ``value`` /
``predicted_value`` — we accept either).
"""
if not isinstance(y_true, pd.DataFrame) or not isinstance(y_pred, pd.DataFrame):
raise ValueError(
f"{task} score: y_true and y_pred must be long-form DataFrames."
)
gt = _normalize_field_col(y_true).copy()
pred = _normalize_field_col(y_pred).copy()
# Normalise the value column on the prediction side (accept both
# ``pred`` and ``value`` names so T6 short-circuit emitters can use
# either).
pred_value_col: str | None = None
for cand in ("pred", "value", "predicted_value"):
if cand in pred.columns:
pred_value_col = cand
break
if pred_value_col is None:
raise ValueError(
f"{task} score: y_pred must have a 'pred' (or 'value') column."
)
join_keys = ["ticker", "field"]
if "fiscal_year" in gt.columns and "fiscal_year" in pred.columns:
join_keys = ["ticker", "fiscal_year", "field"]
n_field_misses = 0
if "fiscal_year" in gt.columns:
gt_keys = set(zip(*[gt[k] for k in join_keys]))
pred_keys = set(zip(*[pred[k] for k in join_keys]))
n_field_misses = len(gt_keys - pred_keys)
merged = pred.merge(
gt, on=join_keys, how="inner",
suffixes=("_pred", "_actual"),
)
n_fields_matched = int(len(merged))
out: dict[str, MetricValue] = {
"n_fields_matched": _scalar_metric(n_fields_matched, resample=resample),
"n_field_misses": _scalar_metric(int(n_field_misses), resample=resample),
"n_tickers": _scalar_metric(
int(merged["ticker"].nunique()) if not merged.empty else 0,
resample=resample,
),
}
if merged.empty:
# No (ticker, fiscal_year, field) overlap between predictions and
# ground truth: every y_true row is "missing" → fillna(0) penalty
# rule applies → APE = min(|0 - actual| / |actual|, clip) on
# |actual| ≥ 1.0 rows. Treat as a 100%-saturation failure so the
# cell still scores (no silent score_failed).
gt_act = pd.to_numeric(gt["value"], errors="coerce").values.astype(np.float64)
gt_keep = np.isfinite(gt_act) & (np.abs(gt_act) >= 1.0)
if gt_keep.any():
ape_gt = np.minimum(
np.abs(gt_act[gt_keep]) / np.abs(gt_act[gt_keep]),
_APE_CLIP_DEFAULT,
) * 100.0 # =100% on every row (predict-zero penalty)
ck = gt["ticker"].astype(str).values[gt_keep] if resample == "cluster" else None
out["overall_mape"] = _wrap_metric(
ape_gt, cluster_keys=ck, agg_fn=np.mean,
n_boot=n_boot, alpha=alpha, seed=seed, resample=resample,
)
else:
out["overall_mape"] = _scalar_metric(100.0, resample=resample)
out["per_field_mape"] = _none_metric(resample=resample)
if task == "T3":
out["balance_equation_accuracy"] = _scalar_metric(0.0, resample=resample)
out["success_rate"] = _scalar_metric(0.0, resample=resample)
return out
# Per-row APE in percent (clip 10×, |actual| ≥ 1.0).
pred_col = f"{pred_value_col}_pred" if pred_value_col != "value" else "value_pred"
if pred_col not in merged.columns:
# When pred_value_col == "value", the suffix path above lands at
# "value_pred"; otherwise the merge keeps the original name.
pred_col = pred_value_col + "_pred" if pred_value_col + "_pred" in merged.columns else pred_value_col
actual_col = "value_actual" if "value_actual" in merged.columns else "value"
pred_vals = pd.to_numeric(merged[pred_col], errors="coerce").values
act_vals = pd.to_numeric(merged[actual_col], errors="coerce").values
pred_arr = np.asarray(pred_vals, dtype=np.float64)
act_arr = np.asarray(act_vals, dtype=np.float64)
# NaN penalty: substitute NaN predictions with ZERO (no-signal)
# so unparseable field-tuples contribute APE=100% (clipped to
# _APE_CLIP_DEFAULT) rather than being silently excluded.
pred_nan = ~np.isfinite(pred_arr)
if pred_nan.any():
pred_arr[pred_nan] = 0.0
keep = np.isfinite(pred_arr) & np.isfinite(act_arr) & (np.abs(act_arr) >= 1.0)
ape = np.abs((pred_arr[keep] - act_arr[keep]) / act_arr[keep])
ape = np.minimum(ape, _APE_CLIP_DEFAULT) * 100.0
cluster_for_ape = merged.loc[keep, "ticker"].astype(str).values
out["overall_mape"] = _wrap_metric(
ape, cluster_keys=cluster_for_ape if resample == "cluster" else None,
agg_fn=np.mean, n_boot=n_boot, alpha=alpha, seed=seed,
resample=resample,
)
# Per-field MAPE table — single deterministic dict, not a Pydantic
# MetricValue. We expose the *count* of fields and a value-set under
# a separate key carrying the dict on `value` is awkward; instead
# we report n_fields_with_mape and the per-field dict is stored on the
# metric's dict via a stable plain key (caller can look up).
per_field: dict[str, float] = {}
field_weights: dict[str, int] = {}
for f, grp in merged.loc[keep].groupby(merged.loc[keep, "field"]):
gp = pd.to_numeric(grp[pred_col], errors="coerce")
ga = pd.to_numeric(grp[actual_col], errors="coerce")
valid = pd.DataFrame({"gp": gp, "ga": ga}).dropna()
valid = valid[valid["ga"].abs() >= 1.0]
if valid.empty:
continue
f_ape = np.minimum(
np.abs((valid["gp"].values - valid["ga"].values) / valid["ga"].values),
_APE_CLIP_DEFAULT,
)
per_field[str(f)] = float(f_ape.mean()) * 100.0
field_weights[str(f)] = int(len(valid))
# Surface per_field as a deterministic scalar metric (n_fields_with_mape).
out["n_fields_with_mape"] = _scalar_metric(
len(per_field), resample=resample,
)
# Stash the dict on a flat namespace key (callers extract via
# ``score(...)["per_field_mape_dict"].value`` won't work because
# MetricValue.value is a float — so we expose a side dict on the
# function's return as ``per_field_mape`` mapped to a degenerate
# MetricValue carrying the average MAPE. To preserve the legacy field
# name we expose the weighted-overall here too).
if per_field:
total_w = sum(field_weights.values())
weighted = sum(per_field[f] * field_weights[f] / total_w for f in per_field)
# We re-expose this under a stable name so legacy consumers can
# still pick it up.
out["per_field_mape_weighted_avg"] = _scalar_metric(
float(weighted), resample=resample,
)
if task in ("T3", "T6"):
# Balance-sheet equation accuracy (per ticker).
bs_checked = 0
bs_pass = 0
m = merged.loc[keep]
for tk in m["ticker"].unique():
tk_data = m[m["ticker"] == tk]
fields_str = tk_data["field"].astype(str)
arow = tk_data[fields_str == "Assets"]
lrow = tk_data[fields_str == "Liabilities"]
erow = tk_data[fields_str == "StockholdersEquity"]
if not arow.empty and not lrow.empty and not erow.empty:
bs_checked += 1
a = pd.to_numeric(arow[pred_col].iloc[0], errors="coerce")
l = pd.to_numeric(lrow[pred_col].iloc[0], errors="coerce")
e = pd.to_numeric(erow[pred_col].iloc[0], errors="coerce")
if (
pd.notna(a) and pd.notna(l) and pd.notna(e)
and float(a) > 0
and abs(float(a) - float(l) - float(e)) / float(a) < 0.01
):
bs_pass += 1
out["balance_equation_accuracy"] = _scalar_metric(
float(bs_pass / bs_checked) if bs_checked > 0 else float("nan"),
resample=resample,
)
out["balance_equation_checked"] = _scalar_metric(
int(bs_checked), resample=resample,
)
# success_rate = unique tickers with a parseable prediction / total
# tickers requested (we approximate via the union of GT tickers).
n_attempted = int(gt["ticker"].nunique()) if "ticker" in gt.columns else 0
n_succeeded = int(pred["ticker"].nunique()) if "ticker" in pred.columns else 0
out["success_rate"] = _scalar_metric(
float(n_succeeded / n_attempted) if n_attempted > 0 else 0.0,
resample=resample,
)
if return_sensitivity:
raw_pred = pred_arr[keep]
raw_act = act_arr[keep]
for clip in _APE_SENSITIVITY_CLIPS:
ape_s = np.abs((raw_pred - raw_act) / raw_act)
if np.isfinite(clip):
ape_s = np.minimum(ape_s, clip)
ape_s = ape_s * 100.0
key = (
f"mape_at_clip_{int(clip)}x" if np.isfinite(clip)
else "mape_at_clip_inf"
)
out[key] = _wrap_metric(
ape_s,
cluster_keys=cluster_for_ape if resample == "cluster" else None,
agg_fn=np.mean, n_boot=n_boot, alpha=alpha, seed=seed,
resample=resample,
)
return out
# -------------------------------------------------------------------
# T4 — Scenario-conditioned forecasting
# -------------------------------------------------------------------
def _adapt_t4(y_true: Any, y_pred: Any) -> tuple[pd.DataFrame, pd.DataFrame]:
if isinstance(y_true, pd.DataFrame) and "actual_return_pct" in y_true.columns:
gt = y_true.reset_index(drop=True)
elif isinstance(y_true, (np.ndarray, list, pd.Series)):
arr = np.asarray(y_true).ravel().astype(np.float64)
gt = pd.DataFrame({
"scenario_id": [f"sc_{i}" for i in range(len(arr))],
"ticker": [f"row_{i}" for i in range(len(arr))],
"actual_return_pct": arr,
})
else:
raise ValueError(
f"T4 score: y_true must be ndarray (N,) or DataFrame; "
f"got {type(y_true).__name__}."
)
if isinstance(y_pred, pd.DataFrame):
if "predicted_return_pct" in y_pred.columns:
pred = y_pred.reset_index(drop=True)
else:
raise ValueError(
"T4 score: y_pred DataFrame must have 'predicted_return_pct'."
)
else:
arr = np.asarray(y_pred).ravel()
if len(arr) != len(gt):
raise ValueError(
f"T4 score: y_pred length {len(arr)} != y_true rows {len(gt)}."
)
pred_dict: dict[str, Any] = {
"scenario_id": gt["scenario_id"].values,
"ticker": gt["ticker"].values,
"predicted_return_pct": arr,
}
if "event_type" in gt.columns:
pred_dict["event_type"] = gt["event_type"].values
pred = pd.DataFrame(pred_dict)
return pred, gt
def _per_task_score_T4(
y_true: Any,
y_pred: Any,
*,
cluster_keys: np.ndarray | None,
n_boot: int | Literal["adaptive"],
alpha: float,
seed: int,
resample: Literal["cluster", "iid"],
) -> dict[str, MetricValue]:
pred_df, gt_df = _adapt_t4(y_true, y_pred)
merged = pred_df.merge(gt_df, on=["scenario_id", "ticker"], how="inner")
merged = merged.reset_index(drop=True)
# Ground truth must never be NaN — surface data-side bugs.
gt_nan = merged["actual_return_pct"].isna().sum()
if gt_nan > 0:
raise ValueError(
f"T4 score: {gt_nan} rows have NaN ground truth (actual_return_pct). "
"This is a loader/preprocessing bug — fix at data source."
)
if merged.empty:
# No overlap between predictions and ground truth: fillna(0)
# penalty → MAE = mean(|actual_return_pct|) using gt rows.
gt_act = pd.to_numeric(gt_df["actual_return_pct"], errors="coerce").values.astype(np.float64)
gt_keep = np.isfinite(gt_act)
if gt_keep.any():
abs_err_gt = np.abs(gt_act[gt_keep]) # |0 - actual| = |actual|
ck = (
gt_df["scenario_id"].astype(str).values[gt_keep]
if resample == "cluster" and "scenario_id" in gt_df.columns else None
)
return {
"return_mae_pct": _wrap_metric(abs_err_gt, cluster_keys=ck, agg_fn=np.mean,
n_boot=n_boot, alpha=alpha, seed=seed, resample=resample),
"directional_accuracy": _scalar_metric(0.0, resample=resample),
"ci_calibration_95": _none_metric(resample=resample),
"n_predictions": _scalar_metric(0, resample=resample),
"n_scenarios": _scalar_metric(0, resample=resample),
}
return {
"return_mae_pct": _scalar_metric(0.0, resample=resample),
"directional_accuracy": _scalar_metric(0.0, resample=resample),
"ci_calibration_95": _none_metric(resample=resample),
"n_predictions": _scalar_metric(0, resample=resample),
"n_scenarios": _scalar_metric(0, resample=resample),
}
# NaN-prediction penalty: substitute with 0.0 (no-signal); MAE = |actual|.
nan_mask = ~np.isfinite(merged["predicted_return_pct"].values)
merged.loc[nan_mask, "predicted_return_pct"] = 0.0
pred = merged["predicted_return_pct"].values.astype(np.float64)
actual = merged["actual_return_pct"].values.astype(np.float64)
abs_err = np.abs(pred - actual)
dir_agree = (np.sign(pred) == np.sign(actual)).astype(np.float64)
# Cluster by scenario_id for T4 (default). Caller may override.
if cluster_keys is None:
cluster_v = merged["scenario_id"].astype(str).values
else:
cluster_v = np.asarray(cluster_keys).ravel()
if cluster_v.size != len(merged):
# Best-effort: rebuild from merged scenario_id if mismatch.
cluster_v = merged["scenario_id"].astype(str).values
out: dict[str, MetricValue] = {}
out["return_mae_pct"] = _wrap_metric(
abs_err, cluster_keys=cluster_v if resample == "cluster" else None,
agg_fn=np.mean, n_boot=n_boot, alpha=alpha, seed=seed,
resample=resample,
)
out["directional_accuracy"] = _wrap_metric(
dir_agree, cluster_keys=cluster_v if resample == "cluster" else None,
agg_fn=np.mean, n_boot=n_boot, alpha=alpha, seed=seed,
resample=resample,
)
if {"predicted_ci_low", "predicted_ci_high"}.issubset(merged.columns):
in_ci = (
(merged["actual_return_pct"] >= merged["predicted_ci_low"])
& (merged["actual_return_pct"] <= merged["predicted_ci_high"])
).astype(np.float64).values
out["ci_calibration_95"] = _wrap_metric(
in_ci, cluster_keys=cluster_v if resample == "cluster" else None,
agg_fn=np.mean, n_boot=n_boot, alpha=alpha, seed=seed,
resample=resample,
)
else:
# No quantile predictions -> metric not applicable. Emit a
# MetricValue with value=None so downstream consumers can detect
# this case via `is None` rather than a NaN finiteness probe.
out["ci_calibration_95"] = _none_metric(resample=resample)
out["n_predictions"] = _scalar_metric(int(len(merged)), resample=resample)
out["n_scenarios"] = _scalar_metric(
int(merged["scenario_id"].nunique()), resample=resample,
)
return out
# -------------------------------------------------------------------
# T7 — Real-estate valuation
# -------------------------------------------------------------------
def _per_task_score_T7(
y_true: Any,
y_pred: Any,
*,
cluster_keys: np.ndarray | None,
n_boot: int | Literal["adaptive"],
alpha: float,
seed: int,
resample: Literal["cluster", "iid"],
return_sensitivity: bool,
) -> dict[str, MetricValue]:
if not isinstance(y_true, pd.DataFrame) or not isinstance(y_pred, pd.DataFrame):
raise ValueError("T7 score: both y_true and y_pred must be DataFrames.")
if "address" not in y_true.columns or "address" not in y_pred.columns:
# Fall back to positional alignment.
merged = pd.concat([
y_pred.reset_index(drop=True),
y_true.reset_index(drop=True).add_suffix("_actual"),
], axis=1)
else:
merged = y_pred.merge(
y_true, on="address", how="inner", suffixes=("_pred", "_actual"),
)
if merged.empty:
# No overlapping addresses: fillna(0) penalty per gt rent + price
# column. Saturates to 100% APE per row.
out: dict[str, MetricValue] = {
"n_predictions": _scalar_metric(0, resample=resample),
}
for target, actual_cands in [
("rent", ["rent", "rentEstimate", "rent_estimate"]),
("price", ["price", "lastSalePrice", "last_sale_price"]),
]:
actual_col = next((c for c in actual_cands if c in y_true.columns), None)
if actual_col is None:
out[f"{target}_MAPE"] = _scalar_metric(float("nan"), resample=resample)
out[f"{target}_median_APE"] = _scalar_metric(float("nan"), resample=resample)
out[f"{target}_n_valid"] = _scalar_metric(0, resample=resample)
continue
gt_act = pd.to_numeric(y_true[actual_col], errors="coerce").values.astype(np.float64)
gt_keep = np.isfinite(gt_act) & (np.abs(gt_act) > 0)
if gt_keep.any():
ape_gt = np.minimum(
np.abs(gt_act[gt_keep]) / np.abs(gt_act[gt_keep]),
_APE_CLIP_DEFAULT,
) * 100.0
ck = (
y_true["address"].astype(str).values[gt_keep]
if resample == "cluster" and "address" in y_true.columns else None
)
out[f"{target}_MAPE"] = _wrap_metric(
ape_gt, cluster_keys=ck, agg_fn=np.mean,
n_boot=n_boot, alpha=alpha, seed=seed, resample=resample,
)
out[f"{target}_median_APE"] = _wrap_metric(
ape_gt, cluster_keys=ck, agg_fn=np.median,
n_boot=n_boot, alpha=alpha, seed=seed, resample=resample,
)
out[f"{target}_n_valid"] = _scalar_metric(int(gt_keep.sum()), resample=resample)
else:
out[f"{target}_MAPE"] = _scalar_metric(100.0, resample=resample)
out[f"{target}_median_APE"] = _scalar_metric(100.0, resample=resample)
out[f"{target}_n_valid"] = _scalar_metric(0, resample=resample)
return out
out: dict[str, MetricValue] = {
"n_predictions": _scalar_metric(int(len(merged)), resample=resample),
}
for target, pred_cands, actual_cands in [
("rent",
["pred_rent", "predicted_rent", "rent_pred"],
["rent_actual", "rent", "rentEstimate_actual", "rent_estimate_actual"]),
("price",
["pred_price", "predicted_price", "price_pred"],
["price_actual", "price", "lastSalePrice_actual", "last_sale_price_actual"]),
]:
pred_col = next((c for c in pred_cands if c in merged.columns), None)
actual_col = next((c for c in actual_cands if c in merged.columns), None)
if pred_col is None or actual_col is None:
out[f"{target}_MAPE"] = _scalar_metric(float("nan"), resample=resample)
out[f"{target}_median_APE"] = _scalar_metric(
float("nan"), resample=resample,
)
out[f"{target}_n_valid"] = _scalar_metric(0, resample=resample)
continue
pred_vals = pd.to_numeric(merged[pred_col], errors="coerce").values
actual_vals = pd.to_numeric(merged[actual_col], errors="coerce").values
# NaN penalty: substitute NaN predictions with ZERO (no-signal).
# APE = 100% per row, clipped at clip_default.
nan_mask = ~np.isfinite(pred_vals)
if nan_mask.any():
pred_vals = np.where(nan_mask, 0.0, pred_vals)
ape_pct, mask = _ape_per_instance(
pred_vals, actual_vals, clip=_APE_CLIP_DEFAULT,
)
if ape_pct.size == 0:
out[f"{target}_MAPE"] = _scalar_metric(float("nan"), resample=resample)
out[f"{target}_median_APE"] = _scalar_metric(
float("nan"), resample=resample,
)
out[f"{target}_n_valid"] = _scalar_metric(0, resample=resample)
continue
# T7 cluster bootstrap = address-level (one cluster per row, so it
# collapses to IID) by convention. If the caller supplied
# cluster_keys (e.g. metro / property_type) honour that.
if cluster_keys is not None:
ck = _align_cluster_keys(cluster_keys, len(merged), mask)
else:
ck = merged.loc[mask, "address"].astype(str).values if "address" in merged.columns else None
out[f"{target}_MAPE"] = _wrap_metric(
ape_pct, cluster_keys=ck if resample == "cluster" else None,
agg_fn=np.mean, n_boot=n_boot, alpha=alpha, seed=seed,
resample=resample,
)
out[f"{target}_median_APE"] = _wrap_metric(
ape_pct, cluster_keys=ck if resample == "cluster" else None,
agg_fn=np.median, n_boot=n_boot, alpha=alpha, seed=seed,
resample=resample,
)
out[f"{target}_n_valid"] = _scalar_metric(
int(ape_pct.size), resample=resample,
)
if return_sensitivity:
for clip in _APE_SENSITIVITY_CLIPS:
ape_v, mask_v = _ape_per_instance(
pred_vals, actual_vals, clip=clip,
)
ck_v = (
_align_cluster_keys(cluster_keys, len(merged), mask_v)
if cluster_keys is not None
else (
merged.loc[mask_v, "address"].astype(str).values
if "address" in merged.columns else None
)
)
key = (
f"{target}_MAPE_at_clip_{int(clip)}x"
if np.isfinite(clip) else f"{target}_MAPE_at_clip_inf"
)
out[key] = _wrap_metric(
ape_v, cluster_keys=ck_v if resample == "cluster" else None,
agg_fn=np.mean, n_boot=n_boot, alpha=alpha, seed=seed,
resample=resample,
)
return out
# ===================================================================
# Cluster-key alignment helper
# ===================================================================
def _align_cluster_keys(
cluster_keys: Any,
n_total: int,
mask: np.ndarray,
) -> np.ndarray | None:
"""Return cluster_keys masked to the rows kept (or None if no keys).
Tolerant fallbacks:
* If ``cluster_keys`` is shorter than ``n_total`` (the upstream merge
dropped rows beyond what the caller knows about), drop cluster_keys
and let the bootstrap fall back to IID — better than raising.
"""
if cluster_keys is None:
return None
arr = np.asarray(cluster_keys).ravel()
if arr.size == n_total:
return arr[mask]
if arr.size == int(mask.sum()):
return arr # already masked
# Length mismatch: typically because the eval-side merge / dropna
# discarded rows the caller didn't know about. Fall back to None
# (degenerate IID bootstrap) rather than raising.
return None
# ===================================================================
# Public API: score
# ===================================================================
def score(
task: str,
y_true: Any,
y_pred: Any,
*,
cluster_keys: Any = None,
close_last: Any = None,
resample: Literal["cluster", "iid"] = "cluster",
n_boot: int | Literal["adaptive"] = "adaptive",
alpha: float = 0.05,
seed: int = 42,
return_sensitivity: bool = False,
) -> dict[str, MetricValue]:
"""Score a (task, y_true, y_pred) triple.
Returns
-------
dict[str, MetricValue]
Per-task metric mapping. Keys per task are documented in the module
docstring; every value is a Pydantic ``MetricValue`` carrying
``value, ci_lo, ci_hi, std, n_boot, resample``.
Notes
-----
* The default resample is ``"cluster"``; on panel data this is the
statistically correct choice.
* If ``cluster_keys`` is None, the function derives it from the inputs:
``ticker`` for T1/T2/T3/T5/T6/T7, ``scenario_id`` for T4. The caller
may override.
* ``close_last`` is a 1-D float array aligned to ``y_true`` rows for T1.
If unavailable we fall back to ``y_pred[:, 0]`` and emit a warning;
the metric ``directional_accuracy_anchor_fallback`` is set to 1.0 so
consumers can detect the fallback.
* ``n_boot="adaptive"`` starts at 1,000 bootstrap draws and escalates
to 10,000 if the relative CI half-width exceeds 5%.
"""
if resample not in ("cluster", "iid"):
raise ValueError(f"resample must be 'cluster' or 'iid', got {resample!r}")
ck_arr: np.ndarray | None
if cluster_keys is None:
ck_arr = None
else:
ck_arr = np.asarray(cluster_keys).ravel()
cl_arr: np.ndarray | None
if close_last is None:
cl_arr = None
else:
cl_arr = np.asarray(close_last, dtype=np.float64).ravel()
if task == "T1":
if ck_arr is None and isinstance(y_true, np.ndarray):
# No cluster keys — caller didn't pass meta["ticker"]; we cannot
# derive ticker from y_true alone. Run cluster bootstrap with a
# one-cluster-per-row degenerate (collapses to IID).
ck_arr = np.arange(len(y_true))
return _per_task_score_T1(
y_true, y_pred,
cluster_keys=ck_arr, close_last=cl_arr,
n_boot=n_boot, alpha=alpha, seed=seed,
resample=resample, return_sensitivity=return_sensitivity,
)
if task in ("T2", "T5"):
return _per_task_score_T2_T5(
y_true, y_pred,
cluster_keys=ck_arr,
n_boot=n_boot, alpha=alpha, seed=seed,
resample=resample, return_sensitivity=return_sensitivity,
)
if task in ("T3", "T6"):
return _per_task_score_T3_T6(
y_true, y_pred,
task=task,
cluster_keys=ck_arr,
n_boot=n_boot, alpha=alpha, seed=seed,
resample=resample, return_sensitivity=return_sensitivity,
)
if task == "T4":
return _per_task_score_T4(
y_true, y_pred,
cluster_keys=ck_arr,
n_boot=n_boot, alpha=alpha, seed=seed,
resample=resample,
)
if task == "T7":
return _per_task_score_T7(
y_true, y_pred,
cluster_keys=ck_arr,
n_boot=n_boot, alpha=alpha, seed=seed,
resample=resample, return_sensitivity=return_sensitivity,
)
raise ValueError(f"Unknown task: {task!r}")
# ===================================================================
# Multiple-comparisons correction — per task
# ===================================================================
def _holm_correction(p_values: np.ndarray, alpha: float = 0.05) -> tuple[np.ndarray, np.ndarray]:
"""Holm-Bonferroni step-down correction.
Returns ``(p_adjusted, reject)`` arrays of the same length as
``p_values``, where ``p_adjusted`` is monotone-increasing in original
rank and ``reject`` is the boolean rejection vector at family-wise
error rate ``alpha``.
"""
p = np.asarray(p_values, dtype=np.float64).ravel()
m = p.size
if m == 0:
return p, np.array([], dtype=bool)
order = np.argsort(p)
p_sorted = p[order]
p_adj_sorted = np.empty(m, dtype=np.float64)
running_max = 0.0
for i in range(m):
adj = (m - i) * p_sorted[i]
running_max = max(running_max, adj)
p_adj_sorted[i] = min(running_max, 1.0)
# Unsort.
p_adj = np.empty_like(p_adj_sorted)
p_adj[order] = p_adj_sorted
return p_adj, p_adj <= alpha
def _bh_correction(p_values: np.ndarray, alpha: float = 0.05) -> tuple[np.ndarray, np.ndarray]:
"""Benjamini-Hochberg FDR correction."""
p = np.asarray(p_values, dtype=np.float64).ravel()
m = p.size
if m == 0:
return p, np.array([], dtype=bool)
order = np.argsort(p)
p_sorted = p[order]
ranks = np.arange(1, m + 1)
p_adj_sorted_raw = p_sorted * m / ranks
# Enforce monotonicity (running min from the right).
p_adj_sorted = np.minimum.accumulate(p_adj_sorted_raw[::-1])[::-1]
p_adj_sorted = np.minimum(p_adj_sorted, 1.0)
p_adj = np.empty_like(p_adj_sorted)
p_adj[order] = p_adj_sorted
return p_adj, p_adj <= alpha
def _extract_record(rec: Any) -> dict[str, Any]:
"""Coerce a record (Pydantic / dict / dataclass) to a plain dict."""
if isinstance(rec, dict):
return rec
if hasattr(rec, "model_dump"):
return rec.model_dump()
if hasattr(rec, "__dict__"):
return dict(rec.__dict__)
raise TypeError(f"Cannot extract record of type {type(rec).__name__}")
def _extract_metric_value(metrics: Any, key: str) -> tuple[float, float, float, int]:
"""Pull (value, std, n_boot, ok) out of a metric dict-or-MetricValue.
Returns ``ok=0`` when the metric is missing or its ``value`` is ``None``
(semantic "not applicable"); finite values pass through with ``ok=1``.
"""
m = metrics.get(key) if isinstance(metrics, dict) else None
if m is None:
return float("nan"), float("nan"), 0, 0
if isinstance(m, MetricValue):
if m.value is None:
return float("nan"), float("nan"), int(m.n_boot), 0
std = float(m.std) if m.std is not None else float("nan")
return float(m.value), std, int(m.n_boot), 1
if isinstance(m, dict):
v = m.get("value", None)
if v is None:
return float("nan"), float("nan"), int(m.get("n_boot", 0)), 0
return (
float(v),
float(m.get("std", float("nan")) if m.get("std", None) is not None else float("nan")),
int(m.get("n_boot", 0)),
1,
)
return float("nan"), float("nan"), 0, 0
# Default headline metric per task (lower-is-better unless noted).
_HEADLINE_METRIC: dict[str, tuple[str, bool]] = {
"T1": ("mse", True),
"T2": ("mape", True),
"T3": ("overall_mape", True),
"T4": ("return_mae_pct", True),
"T5": ("mape", True),
"T6": ("overall_mape", True),
"T7": ("rent_MAPE", True),
}
def compare_methods(
task: str,
records: list,
*,
correction: Literal["holm", "bh"] = "holm",
alpha: float = 0.05,
headline_metric: str | None = None,
) -> "pd.DataFrame":
"""Pairwise compare every method on ``task`` against the best baseline.
Parameters
----------
task
``"T1"`` .. ``"T7"``.
records
Iterable of ``RunRecord``-shaped objects (Pydantic models, dicts,
or anything with ``.method_id``, ``.task``, ``.metrics``).
correction
``"holm"`` (default; FWER) or ``"bh"`` (FDR). Per-task scope only —
no cross-task FWER claim.
alpha
Family-wise error rate (Holm) or false discovery rate (BH).
headline_metric
Override the per-task headline metric (default uses
``_HEADLINE_METRIC[task]``). The metric must exist on every
record's ``metrics`` dict.
Returns
-------
DataFrame
One row per method with columns
``[method_id, value, std, n_boot, z, p_value, p_adj, reject_null]``.
The lowest-value method (or highest, if ``lower_is_better=False``)
is the reference; its ``p_value`` is NaN.
"""
if task not in _HEADLINE_METRIC:
raise ValueError(f"Unknown task: {task!r}")
metric_key, lower_is_better = _HEADLINE_METRIC[task]
if headline_metric is not None:
metric_key = headline_metric
rows: list[dict[str, Any]] = []
for rec in records:
d = _extract_record(rec)
if d.get("task") != task:
continue
metrics = d.get("metrics")
if not metrics:
continue
v, std, n_b, ok = _extract_metric_value(metrics, metric_key)
if not ok or not np.isfinite(v):
continue
rows.append({
"method_id": d.get("method_id", "?"),
"value": v,
"std": std,
"n_boot": n_b,
})
if not rows:
return pd.DataFrame(
columns=["method_id", "value", "std", "n_boot",
"z", "p_value", "p_adj", "reject_null"]
)
df = pd.DataFrame(rows)
# Pick the reference method.
if lower_is_better:
ref_idx = int(df["value"].idxmin())
else:
ref_idx = int(df["value"].idxmax())
ref_v = float(df.loc[ref_idx, "value"])
ref_std = float(df.loc[ref_idx, "std"])
# Two-sided z test using bootstrap stds; combined under independence
# (this is conservative — bootstrap stds are within-method only;
# cross-method covariance is unknown without the full bootstrap
# distribution, which we don't carry on RunRecord by design).
from scipy.stats import norm
z_vals: list[float] = []
p_vals: list[float] = []
for i, row in df.iterrows():
if i == ref_idx:
z_vals.append(float("nan"))
p_vals.append(float("nan"))
continue
denom = float(np.sqrt(row["std"] ** 2 + ref_std ** 2))
if denom <= 0 or not np.isfinite(denom):
z_vals.append(float("nan"))
p_vals.append(float("nan"))
continue
z = (float(row["value"]) - ref_v) / denom
z_vals.append(z)
p_vals.append(float(2.0 * (1.0 - norm.cdf(abs(z)))))
df["z"] = z_vals
df["p_value"] = p_vals
p_arr = np.asarray(df["p_value"].values, dtype=np.float64)
finite = np.isfinite(p_arr)
p_finite = p_arr[finite]
if correction == "holm":
p_adj_finite, reject_finite = _holm_correction(p_finite, alpha=alpha)
elif correction == "bh":
p_adj_finite, reject_finite = _bh_correction(p_finite, alpha=alpha)
else:
raise ValueError(f"correction must be 'holm' or 'bh', got {correction!r}")
p_adj = np.full_like(p_arr, np.nan)
reject = np.zeros(p_arr.size, dtype=bool)
p_adj[finite] = p_adj_finite
reject[finite] = reject_finite
df["p_adj"] = p_adj
df["reject_null"] = reject
return df.sort_values("value", ascending=lower_is_better).reset_index(drop=True)
# ===================================================================
# Convenience re-exports
# ===================================================================
__all__ = [
"score",
"compare_methods",
"MetricValue",
"_bootstrap_ci",
"_close_anchor_da",
"_holm_correction",
"_bh_correction",
]