| """Layer 3 – Step 8: Assemble benchmark artifacts from the processed panel. |
| |
| Reads ``data/processed/{granularity}/panel.parquet`` and produces: |
| |
| data/benchmark/{granularity}/panel_train.parquet |
| data/benchmark/{granularity}/panel_test.parquet |
| data/benchmark/{granularity}/panel_full.csv (CSV compatibility) |
| data/benchmark/{granularity}/task_definition.json |
| data/benchmark/{granularity}/filing_corpus.parquet |
| data/benchmark/{granularity}/metadata.json |
| |
| Does NOT re-process raw data. All heavy lifting happened in |
| ``preprocess.py`` (Layer 2). |
| """ |
|
|
| from __future__ import annotations |
|
|
| import json |
| import logging |
| from pathlib import Path |
|
|
| import numpy as np |
| import pandas as pd |
|
|
| from . import config |
|
|
| logger = logging.getLogger(__name__) |
|
|
|
|
| |
| |
| |
|
|
| def _build_filing_corpus() -> pd.DataFrame: |
| """Build a filing corpus index from ``data/filings/{TICKER}/*.md``. |
| |
| Stores only **metadata** (ticker, filing_type, filing_date, filing_path) |
| -- NOT the full text -- to avoid OOM with thousands of large filings. |
| Text is loaded on-demand by ``benchmark_loader.py`` using ``filing_path``. |
| |
| Columns: ticker, filing_type, filing_date, filing_path, text_length. |
| """ |
| import re as _re |
|
|
| rows: list[dict] = [] |
| if not config.FILINGS_DIR.is_dir(): |
| logger.warning("Filings directory does not exist: %s", config.FILINGS_DIR) |
| return pd.DataFrame(columns=["ticker", "filing_type", "filing_date", "filing_path", "text_length"]) |
|
|
| for ticker_dir in sorted(config.FILINGS_DIR.iterdir()): |
| if not ticker_dir.is_dir(): |
| continue |
| ticker = ticker_dir.name |
| for md_file in sorted(ticker_dir.glob("*.md")): |
| ftype = "10-K" if "10-K" in md_file.name else "10-Q" if "10-Q" in md_file.name else "8-K" if "8-K" in md_file.name else "other" |
| match = _re.search(r"(\d{4}-\d{2}-\d{2})", md_file.name) |
| fdate = match.group(1) if match else None |
| |
| try: |
| text_len = md_file.stat().st_size |
| except Exception: |
| text_len = 0 |
| rows.append({ |
| "ticker": ticker, |
| "filing_type": ftype, |
| "filing_date": fdate, |
| "filing_path": str(md_file.relative_to(config.DATA_DIR)), |
| "text_length": text_len, |
| }) |
|
|
| df = pd.DataFrame(rows) |
| if not df.empty and "filing_date" in df.columns: |
| df["filing_date"] = pd.to_datetime(df["filing_date"], errors="coerce") |
| logger.info("Filing corpus index: %d documents across %d tickers.", |
| len(df), df["ticker"].nunique() if not df.empty else 0) |
| return df |
|
|
|
|
| |
| |
| |
|
|
| def _build_task_definition(panel: pd.DataFrame, granularity: str) -> dict: |
| """Create the formal forecasting-task contract.""" |
| |
| col_roles_path = config.DATA_DIR / "processed" / granularity / "columns.json" |
| if col_roles_path.exists(): |
| column_roles = json.loads(col_roles_path.read_text()) |
| else: |
| column_roles = {} |
|
|
| return { |
| "benchmark_name": "MacroLens", |
| "version": "1.0", |
| "granularity": granularity, |
| "targets": { |
| "primary": "close", |
| "secondary": "volume", |
| }, |
| "horizons": config.get_horizons(granularity), |
| "lookback_windows": config.get_lookback_windows(granularity), |
| "column_roles": column_roles, |
| "context_taxonomy": { |
| "historical": "10-K / 10-Q filing text (nearest filing as-of each date)", |
| "covariate": "FRED / EIA macro indicators (exogenous_macro + exogenous_commodity)", |
| "causal": "Fundamental ratios derived from statements + price (exogenous_fundamental)", |
| "future_scenario": "Natural experiment events detected from macro data (scenarios.parquet)", |
| "intemporal": "Sector / industry knowledge (metadata columns)", |
| }, |
| "evaluation": { |
| "metrics": ["MSE", "MAE", "RMSE", "directional_accuracy"], |
| "baseline": "naive_last_value", |
| "primary_metric": "MSE", |
| }, |
| "scenario_method": "natural_experiments", |
| } |
|
|
|
|
| |
| |
| |
|
|
| def run(granularity: str | None = None) -> None: |
| """Execute Layer 3 benchmark assembly.""" |
| if granularity is None: |
| granularity = config.GRANULARITY |
|
|
| panel_path = config.DATA_DIR / "processed" / granularity / "panel.parquet" |
| if not panel_path.exists(): |
| raise FileNotFoundError(f"Run Step 7 (preprocess) first: {panel_path}") |
|
|
| out_dir = config.DATA_DIR / "benchmark" / granularity |
| out_dir.mkdir(parents=True, exist_ok=True) |
|
|
| |
| panel = pd.read_parquet(panel_path) |
| logger.info("Loaded processed panel: %d rows, %d tickers, %d columns.", |
| len(panel), panel["ticker"].nunique(), len(panel.columns)) |
|
|
| |
| if config.TEMPORAL_SPLIT_DATE is not None: |
| split_date = pd.Timestamp(config.TEMPORAL_SPLIT_DATE) |
| else: |
| unique_dates = np.sort(panel["date"].unique()) |
| split_idx = int(len(unique_dates) * config.TEMPORAL_SPLIT_RATIO) |
| split_idx = max(1, min(split_idx, len(unique_dates) - 1)) |
| split_date = pd.Timestamp(unique_dates[split_idx]) |
| logger.info("Ratio-based split (%.0f:%.0f): split date = %s (%d/%d unique dates)", |
| config.TEMPORAL_SPLIT_RATIO * 100, |
| (1 - config.TEMPORAL_SPLIT_RATIO) * 100, |
| split_date.date(), split_idx, len(unique_dates)) |
|
|
| panel["split"] = np.where(panel["date"] < split_date, "train", "test") |
| train = panel[panel["split"] == "train"] |
| test = panel[panel["split"] == "test"] |
|
|
| |
| |
| train_tickers = set(train["ticker"].unique()) |
| test_only = set(test["ticker"].unique()) - train_tickers |
| if test_only: |
| logger.info("%d cold-start tickers in test (IPOs).", len(test_only)) |
|
|
| train.to_parquet(out_dir / "panel_train.parquet", index=False) |
| test.to_parquet(out_dir / "panel_test.parquet", index=False) |
| panel.to_csv(out_dir / "panel_full.csv", index=False) |
| logger.info("Saved train (%d rows) + test (%d rows) + CSV.", len(train), len(test)) |
|
|
| |
| task_def = _build_task_definition(panel, granularity) |
| (out_dir / "task_definition.json").write_text(json.dumps(task_def, indent=2, default=str)) |
| logger.info("Saved task_definition.json.") |
|
|
| |
| corpus = _build_filing_corpus() |
| if not corpus.empty: |
| corpus.to_parquet(out_dir / "filing_corpus.parquet", index=False) |
| logger.info("Saved filing_corpus.parquet (%d documents).", len(corpus)) |
|
|
| |
| metadata = { |
| "format": "panel_data", |
| "granularity": granularity, |
| "primary_key": ["ticker", "date"], |
| "total_rows": len(panel), |
| "total_tickers": int(panel["ticker"].nunique()), |
| "date_range": { |
| "start": str(panel["date"].min().date()), |
| "end": str(panel["date"].max().date()), |
| }, |
| "temporal_split": { |
| "split_date": str(split_date.date()), |
| "split_method": ( |
| "fixed_date" if config.TEMPORAL_SPLIT_DATE |
| else f"ratio_{config.TEMPORAL_SPLIT_RATIO}" |
| ), |
| "train_rows": len(train), |
| "test_rows": len(test), |
| "train_date_range": { |
| "start": str(train["date"].min().date()) if len(train) > 0 else None, |
| "end": str(train["date"].max().date()) if len(train) > 0 else None, |
| }, |
| "test_date_range": { |
| "start": str(test["date"].min().date()) if len(test) > 0 else None, |
| "end": str(test["date"].max().date()) if len(test) > 0 else None, |
| }, |
| }, |
| "label_distribution": panel["label"].value_counts().to_dict() if "label" in panel.columns else {}, |
| "columns": list(panel.columns), |
| "column_count": len(panel.columns), |
| "column_roles": task_def.get("column_roles", {}), |
| "filing_corpus_stats": { |
| "total_documents": len(corpus), |
| "tickers_with_filings": int(corpus["ticker"].nunique()) if not corpus.empty else 0, |
| }, |
| "evaluation_protocol": task_def.get("evaluation", {}), |
| } |
| (out_dir / "metadata.json").write_text(json.dumps(metadata, indent=2, default=str)) |
| logger.info("Saved metadata.json. Base benchmark assembly complete -> %s", out_dir) |
|
|
| |
| try: |
| from .build_valuation_tasks import build_valuation_benchmark |
| logger.info("Building valuation benchmark artifacts (%s) …", granularity) |
| val_summary = build_valuation_benchmark(granularity=granularity) |
| logger.info("Valuation benchmark: %s", val_summary) |
| except Exception: |
| logger.warning("Valuation benchmark build skipped or failed", exc_info=True) |
|
|