MacroLens / code /benchmark_loader.py
itouchz's picture
Add files using upload-large-folder tool
5995ef5 verified
"""Runtime: Hybrid DataLoader for the MacroLens benchmark.
Provides ``WhatIfTSFDataset`` -- a lightweight, on-the-fly instance
generator that reads from pre-built benchmark artifacts
(``panel_train.parquet`` / ``panel_test.parquet``, ``scenarios.parquet``,
``filing_corpus.parquet``).
One "instance" = a tuple of:
(lookback_window, forecast_target, context_dict)
where ``context_dict`` holds metadata, filing text, macro, and any scenario
information that falls within the instance's time window.
Usage example
-------------
.. code-block:: python
from whatif_bench.benchmark_loader import WhatIfTSFDataset
# Defaults to granularity-appropriate lookback/horizon from config
ds = WhatIfTSFDataset(split="train")
print(len(ds)) # total number of sliding-window instances
sample = ds[0] # dict with 'lookback', 'target', 'context'
# Each scenario in context["scenarios"] has a "scenario_role" field:
# "observed" = already happened (in lookback window)
# "hypothetical" = in forecast horizon (the "what-if" condition)
"""
from __future__ import annotations
import json
import logging
from pathlib import Path
from typing import Any
import numpy as np
import pandas as pd
from . import config
logger = logging.getLogger(__name__)
class WhatIfTSFDataset:
"""Sliding-window dataset over the MacroLens benchmark panel.
Parameters
----------
split : str
``"train"`` or ``"test"``.
lookback : int, optional
Number of past time-steps visible to the model (in panel periods).
Defaults to the first entry of ``config.LOOKBACK_WINDOWS_BY_GRANULARITY``
for the chosen granularity (63 for daily, 13 for weekly, 3 for monthly).
horizon : int, optional
Number of future time-steps to predict (in panel periods).
Defaults to the first entry of ``config.HORIZONS_BY_GRANULARITY``
for the chosen granularity (5 for daily, 4 for weekly, 1 for monthly).
granularity : str, optional
Defaults to ``config.GRANULARITY``.
target_col : str
Column name of the prediction target. Default: ``"close"``.
load_text : bool
If True, load ``filing_corpus.parquet`` and attach filing text to
context. Set to False for fast iteration.
"""
def __init__(
self,
split: str = "train",
lookback: int | None = None,
horizon: int | None = None,
granularity: str | None = None,
target_col: str = "close",
load_text: bool = True,
) -> None:
if granularity is None:
granularity = config.GRANULARITY
self.granularity = granularity
self.split = split
# Granularity-aware defaults from config
if lookback is None:
lookback = config.get_lookback_windows(granularity)[0]
if horizon is None:
horizon = config.get_horizons(granularity)[0]
self.lookback = lookback
self.horizon = horizon
self.target_col = target_col
bench_dir = config.DATA_DIR / "benchmark" / granularity
# ---- Load panel split ------------------------------------------------
panel_path = bench_dir / f"panel_{split}.parquet"
if not panel_path.exists():
raise FileNotFoundError(f"Benchmark not assembled: {panel_path}")
self._panel = pd.read_parquet(panel_path)
self._panel["date"] = pd.to_datetime(self._panel["date"])
self._panel = self._panel.sort_values(["ticker", "date"]).reset_index(drop=True)
# ---- Validate target column exists -----------------------------------
if target_col not in self._panel.columns:
available = [c for c in self._panel.columns if self._panel[c].dtype.kind in "fiub"]
raise ValueError(
f"target_col={target_col!r} not in panel columns. "
f"Available numeric columns: {available}"
)
# ---- Build instance index (ticker, start_idx, end_idx) ---------------
self._instances: list[tuple[str, int, int]] = []
required_len = lookback + horizon
for ticker, grp in self._panel.groupby("ticker"):
n = len(grp)
if n < required_len:
continue
start_positions = range(n - required_len + 1)
grp_idx = grp.index.tolist()
for s in start_positions:
self._instances.append((ticker, grp_idx[s], grp_idx[s + required_len - 1]))
# ---- Scenarios -------------------------------------------------------
scenarios_path = bench_dir / "scenarios.parquet"
if scenarios_path.exists():
self._scenarios = pd.read_parquet(scenarios_path)
self._scenarios["event_date"] = pd.to_datetime(self._scenarios["event_date"])
else:
self._scenarios = pd.DataFrame()
# ---- Filing corpus index (optional, text loaded on-demand) -----------
self._corpus: pd.DataFrame | None = None
self._corpus_by_ticker: dict[str, pd.DataFrame] = {}
if load_text:
corpus_path = bench_dir / "filing_corpus.parquet"
if corpus_path.exists():
self._corpus = pd.read_parquet(corpus_path)
self._corpus["filing_date"] = pd.to_datetime(
self._corpus["filing_date"], errors="coerce",
)
self._corpus = self._corpus.sort_values("filing_date")
# Pre-build per-ticker index for O(1) lookup
for ticker, grp in self._corpus.groupby("ticker"):
self._corpus_by_ticker[str(ticker)] = grp
# ---- Task definition -------------------------------------------------
task_path = bench_dir / "task_definition.json"
self.task_definition: dict = {}
if task_path.exists():
self.task_definition = json.loads(task_path.read_text())
n_tickers = self._panel["ticker"].nunique()
logger.info(
"WhatIfTSFDataset(%s/%s, lookback=%d, horizon=%d): %d instances from %d tickers.",
split, granularity, lookback, horizon, len(self._instances), n_tickers,
)
if len(self._instances) == 0 and n_tickers > 0:
max_len = self._panel.groupby("ticker").size().max()
logger.warning(
"ZERO instances generated! lookback(%d) + horizon(%d) = %d periods required, "
"but longest ticker has only %d periods. "
"Consider using smaller lookback/horizon values for %s granularity. "
"Suggested defaults: lookback=%d, horizon=%d.",
lookback, horizon, lookback + horizon, max_len, granularity,
config.get_lookback_windows(granularity)[0],
config.get_horizons(granularity)[0],
)
# ------------------------------------------------------------------
# Sequence protocol
# ------------------------------------------------------------------
def __len__(self) -> int:
return len(self._instances)
def canonical_indices(self, task: str = "T1") -> list[int]:
"""Return the dataset-instance indices matching the canonical
``(ticker, anchor_date)`` pairs from
``dataloader.canonical_indices.get_canonical_indices(task)``.
For T1, ``anchor_date`` is the lookback-end date (i.e. the latest
date in the window). Every T1 baseline must iterate exactly these
indices so cross-method comparison is on identical instances.
"""
from .dataloader.canonical_indices import get_canonical_indices
canonical = get_canonical_indices(
task, "eval", granularity=self.granularity,
)
canonical_set = {
(str(t), pd.Timestamp(a))
for t, a in zip(
canonical["ticker"].astype(str),
pd.to_datetime(canonical["anchor_date"]),
)
}
out: list[int] = []
for i, (ticker, row_start, _row_end) in enumerate(self._instances):
lookback_end_date = pd.Timestamp(
self._panel.loc[row_start + self.lookback - 1, "date"]
)
if (str(ticker), lookback_end_date) in canonical_set:
out.append(i)
return out
def __getitem__(self, idx: int) -> dict[str, Any]:
if idx < 0 or idx >= len(self._instances):
raise IndexError(f"Index {idx} out of range [0, {len(self._instances)})")
ticker, row_start, row_end = self._instances[idx]
window = self._panel.loc[row_start: row_end].copy()
lookback_df = window.iloc[: self.lookback]
target_df = window.iloc[self.lookback:]
date_start = lookback_df["date"].iloc[0]
date_end = target_df["date"].iloc[-1]
# Numeric feature columns
exclude = {"ticker", "date", "label", "split",
"nearest_filing_type", "nearest_filing_date", "nearest_filing_path"}
feat_cols = [c for c in lookback_df.columns if c not in exclude and lookback_df[c].dtype.kind in "fiub"]
# Context
context: dict[str, Any] = {
"ticker": ticker,
"date_start": str(date_start.date()),
"date_end": str(date_end.date()),
"sector": lookback_df.get("sector", pd.Series()).iloc[0] if "sector" in lookback_df.columns else None,
"industry": lookback_df.get("industry", pd.Series()).iloc[0] if "industry" in lookback_df.columns else None,
"label": lookback_df["label"].iloc[0] if "label" in lookback_df.columns else None,
}
# Macro state summary for LLM agents -- human-readable snapshot of the
# latest macro values at the lookback end.
_MACRO_LABELS = {
"fred_FEDFUNDS": "Fed Funds Rate",
"fred_DGS2": "2Y Treasury",
"fred_DGS10": "10Y Treasury",
"fred_VIXCLS": "VIX",
"fred_SP500": "S&P 500",
"fred_NASDAQCOM": "NASDAQ",
"fred_DTWEXBGS": "USD Index",
"eia_crude_spot": "WTI Crude ($/bbl)",
"eia_ng_spot": "Nat Gas ($/MMBtu)",
}
macro_snapshot: dict[str, float | str] = {}
last_row = lookback_df.iloc[-1]
for col, label in _MACRO_LABELS.items():
if col in lookback_df.columns:
val = last_row[col]
if pd.notna(val):
macro_snapshot[label] = round(float(val), 2)
if macro_snapshot:
context["macro_state"] = macro_snapshot
# Filing text (nearest 10-K/10-Q as-of the lookback end) -- O(1) dict lookup
if self._corpus_by_ticker:
lookback_end = lookback_df["date"].iloc[-1]
ticker_filings = self._corpus_by_ticker.get(ticker)
if ticker_filings is not None:
valid = ticker_filings[ticker_filings["filing_date"] <= lookback_end]
if not valid.empty:
# Nearest 10-K/10-Q for primary filing context
annual_q = valid[valid["filing_type"].isin(["10-K", "10-Q"])]
if not annual_q.empty:
latest = annual_q.iloc[-1]
context["filing_type"] = latest.get("filing_type", "")
context["filing_date"] = str(latest.get("filing_date", ""))
filing_path = latest.get("filing_path", "")
if filing_path:
full_path = config.DATA_DIR / filing_path
try:
context["filing_text"] = full_path.read_text(
encoding="utf-8", errors="replace"
)
except Exception:
context["filing_text"] = ""
else:
context["filing_text"] = ""
# 8-K filings within the lookback window
lookback_start = lookback_df["date"].iloc[0]
eightk = valid[
(valid["filing_type"] == "8-K")
& (valid["filing_date"] >= lookback_start)
]
if not eightk.empty:
eightk_texts = []
for _, row in eightk.iterrows():
fp = row.get("filing_path", "")
if fp:
full_path = config.DATA_DIR / fp
try:
eightk_texts.append(full_path.read_text(
encoding="utf-8", errors="replace"
))
except Exception:
pass
if eightk_texts:
context["filing_8k_texts"] = eightk_texts
# Recent news from yfinance per-ticker JSON
news_path = config.NEWS_DIR / "tickers" / f"{ticker}.json"
if news_path.exists():
try:
all_news = json.loads(news_path.read_text(encoding="utf-8"))
lookback_end_dt = lookback_df["date"].iloc[-1]
lookback_start_dt = lookback_df["date"].iloc[0]
recent = []
for art in all_news:
pub = art.get("pubDate") or art.get("pub_date") or art.get("providerPublishTime")
if pub is None:
continue
try:
ts = pd.Timestamp(pub)
except Exception:
continue
if lookback_start_dt <= ts <= lookback_end_dt:
recent.append(art)
if recent:
context["recent_news"] = recent
except Exception:
pass
# Scenario overlay -- label each as "observed" (in lookback) or
# "hypothetical" (in forecast horizon), which is the core semantic
# distinction for what-if evaluation.
if not self._scenarios.empty:
lookback_end = lookback_df["date"].iloc[-1]
overlapping = self._scenarios[
(self._scenarios["event_date"] >= date_start)
& (self._scenarios["event_date"] <= date_end)
].copy()
if not overlapping.empty:
overlapping["scenario_role"] = np.where(
overlapping["event_date"] <= lookback_end,
"observed", # Already happened -- model should know this
"hypothetical", # In forecast window -- the "what-if" condition
)
sc_cols = [
"scenario_id", "event_type", "event_date",
"event_description", "scenario_role",
]
# Include news_context if available
if "news_context" in self._scenarios.columns:
sc_cols.append("news_context")
context["scenarios"] = overlapping[
[c for c in sc_cols if c in overlapping.columns]
].to_dict("records")
return {
"lookback": lookback_df[feat_cols].values.astype(np.float32),
"lookback_dates": lookback_df["date"].dt.strftime("%Y-%m-%d").tolist(),
"target": target_df[self.target_col].values.astype(np.float32),
"target_dates": target_df["date"].dt.strftime("%Y-%m-%d").tolist(),
"context": context,
"feature_names": feat_cols,
}
# ------------------------------------------------------------------
# Convenience
# ------------------------------------------------------------------
def summary(self) -> dict[str, Any]:
"""Quick dataset summary statistics."""
return {
"split": self.split,
"granularity": self.granularity,
"lookback": self.lookback,
"horizon": self.horizon,
"num_instances": len(self._instances),
"num_tickers": self._panel["ticker"].nunique(),
"date_range": [
str(self._panel["date"].min().date()),
str(self._panel["date"].max().date()),
],
"num_scenarios": len(self._scenarios),
"corpus_loaded": self._corpus is not None and not self._corpus.empty,
}
# ===================================================================
# ValuationDataset: point-in-time valuation benchmark loader
# ===================================================================
class ValuationDataset:
"""Dataset for valuation benchmark tasks (A–F).
Each instance is a point-in-time snapshot suitable for:
Task A: estimate equity value given observable financials (public company)
Task B: generate financial statements given company profile
Task C: forecast scenario impact given pre-event data
Task D: estimate equity value given only financials + sector (PE simulation)
Task E: generate financial statements for unseen companies (Generator eval)
Task F: estimate rent/price for properties (RE valuation)
Parameters
----------
task : str
``"A"``–``"F"`` (or full name like ``"valuation_accuracy"``).
granularity : str
Defaults to ``config.GRANULARITY``.
"""
_TASK_MAP = {
"A": "A_valuation_accuracy",
"B": "B_statement_generation",
"C": "C_scenario_forecast",
"D": "D_private_valuation",
"E": "E_generator_evaluation",
"F": "F_real_estate_valuation",
"valuation_accuracy": "A_valuation_accuracy",
"statement_generation": "B_statement_generation",
"scenario_forecast": "C_scenario_forecast",
"private_valuation": "D_private_valuation",
"generator_evaluation": "E_generator_evaluation",
"real_estate_valuation": "F_real_estate_valuation",
}
def __init__(
self,
task: str = "A",
granularity: str | None = None,
) -> None:
if granularity is None:
granularity = config.GRANULARITY
self.granularity = granularity
self.task = self._TASK_MAP.get(task, task)
bench_dir = config.DATA_DIR / "benchmark" / granularity
# Load task definitions
task_path = bench_dir / "valuation_tasks.json"
if task_path.exists():
self.task_definitions = json.loads(task_path.read_text())
else:
self.task_definitions = {}
task_def = self.task_definitions.get("tasks", {}).get(self.task, {})
input_file = task_def.get("input")
gt_file = task_def.get("ground_truth")
# Load inputs
self._inputs = pd.DataFrame()
if input_file:
p = bench_dir / input_file
if p.exists():
self._inputs = pd.read_parquet(p) if p.suffix == ".parquet" else pd.read_csv(p)
# Load ground truth
self._ground_truth = pd.DataFrame()
if gt_file:
p = bench_dir / gt_file
if p.exists():
self._ground_truth = pd.read_parquet(p) if p.suffix == ".parquet" else pd.read_csv(p)
# Holdout tickers
self.holdout_tickers = self.task_definitions.get("holdout_tickers", [])
logger.info(
"ValuationDataset(task=%s, gran=%s): %d inputs, %d ground_truth rows",
self.task, granularity, len(self._inputs), len(self._ground_truth),
)
@property
def inputs(self) -> pd.DataFrame:
return self._inputs
@property
def ground_truth(self) -> pd.DataFrame:
return self._ground_truth
def __len__(self) -> int:
return len(self._inputs)
def __getitem__(self, idx: int) -> dict[str, Any]:
if idx < 0 or idx >= len(self._inputs):
raise IndexError(f"Index {idx} out of range [0, {len(self._inputs)})")
row = self._inputs.iloc[idx]
item: dict[str, Any] = {"input": row.to_dict()}
# Attach ground truth if available
if not self._ground_truth.empty:
if self.task in ("A_valuation_accuracy", "D_private_valuation"):
tk = row.get("ticker")
dt = row.get("date")
match = self._ground_truth[
(self._ground_truth["ticker"] == tk)
& (self._ground_truth["date"] == dt)
]
if not match.empty:
item["ground_truth"] = match.iloc[0].to_dict()
elif self.task in ("B_statement_generation", "E_generator_evaluation"):
tk = row.get("ticker")
match = self._ground_truth[self._ground_truth["ticker"] == tk]
if not match.empty:
item["ground_truth"] = match.to_dict("records")
elif self.task == "C_scenario_forecast":
sid = row.get("scenario_id")
if sid:
match = self._ground_truth[self._ground_truth["scenario_id"] == sid]
if not match.empty:
item["ground_truth"] = match.to_dict("records")
elif self.task == "F_real_estate_valuation":
# Match by index position (inputs and GT are aligned)
if idx < len(self._ground_truth):
item["ground_truth"] = self._ground_truth.iloc[idx].to_dict()
return item
def summary(self) -> dict[str, Any]:
"""Quick dataset summary."""
s: dict[str, Any] = {
"task": self.task,
"granularity": self.granularity,
"n_inputs": len(self._inputs),
"n_ground_truth": len(self._ground_truth),
"n_holdout_tickers": len(self.holdout_tickers),
}
if not self._inputs.empty and "ticker" in self._inputs.columns:
s["n_tickers"] = self._inputs["ticker"].nunique()
return s