MacroLens / code /enrich_benchmark.py
itouchz's picture
Add files using upload-large-folder tool
5995ef5 verified
"""Step 11 – Enrich benchmark panels with news-derived features.
Lightweight post-processing that adds columns to the **L3 benchmark**
panels (not L2 processed) and enriches ``scenarios.parquet`` with
collected news context.
New columns added to ``panel_train.parquet`` / ``panel_test.parquet``:
* ``filing_8k_count_30d`` (int) – 8-K filings in the past 30 days
* ``news_count_7d`` (int) – yfinance news articles in past 7 days
* ``has_press_release_7d`` (bool) – press release in past 7 days
New column added to ``scenarios.parquet``:
* ``news_context`` (str, JSON) – top-5 scenario news articles
Resume: skips if columns already exist in parquet files.
"""
from __future__ import annotations
import json
import logging
from pathlib import Path
import numpy as np
import pandas as pd
from . import config
logger = logging.getLogger(__name__)
# ---------------------------------------------------------------------------
# Helper: rolling-window count via prefix-sum + searchsorted
# ---------------------------------------------------------------------------
def _rolling_window_count(
panel_dates_i64: np.ndarray,
panel_groups: dict[str, np.ndarray],
events: pd.DataFrame,
window_days: int,
n_rows: int,
) -> np.ndarray:
"""Count events within a rolling calendar-day window per ticker.
Uses cumulative-sum differencing with ``np.searchsorted`` – loops
over tickers that have events (typically a small subset), but each
iteration is pure numpy O(n log m).
Parameters
----------
panel_dates_i64 : int64 nanosecond timestamps for all panel rows
panel_groups : dict mapping ticker → integer row indices in the panel
events : DataFrame with columns [ticker, date, n] (daily counts)
window_days : size of the look-back window (inclusive both ends)
n_rows : total number of rows in the panel
Returns
-------
np.ndarray[int64] of length *n_rows*.
"""
result = np.zeros(n_rows, dtype=np.int64)
if events.empty:
return result
window_ns = np.int64((window_days + 1) * 86_400_000_000_000)
for ticker, ev_group in events.groupby("ticker"):
if ticker not in panel_groups:
continue
panel_idx = panel_groups[ticker]
p_dates = panel_dates_i64[panel_idx]
ev_sorted = ev_group.sort_values("date")
e_dates = ev_sorted["date"].values.astype("int64")
e_cumsum = ev_sorted["n"].values.cumsum()
upper_pos = np.searchsorted(e_dates, p_dates, side="right") - 1
upper_cs = np.where(upper_pos >= 0, e_cumsum[upper_pos], 0)
lower_dates = p_dates - window_ns
lower_pos = np.searchsorted(e_dates, lower_dates, side="right") - 1
lower_cs = np.where(lower_pos >= 0, e_cumsum[lower_pos], 0)
result[panel_idx] = upper_cs - lower_cs
return result
# ---------------------------------------------------------------------------
# 1. Filing 8-K count
# ---------------------------------------------------------------------------
def _add_8k_counts(panel: pd.DataFrame, corpus_path: Path) -> pd.DataFrame:
"""Add ``filing_8k_count_30d`` (fully vectorised, no calendar reindexing)."""
if "filing_8k_count_30d" in panel.columns:
logger.info(" filing_8k_count_30d already present – skipping")
return panel
if not corpus_path.exists():
logger.warning("filing_corpus.parquet not found – filling 8k count with 0")
panel["filing_8k_count_30d"] = 0
return panel
corpus = pd.read_parquet(corpus_path)
eightk = corpus[corpus["filing_type"] == "8-K"].copy()
if eightk.empty:
logger.info(" No 8-K filings in corpus – filling with 0")
panel["filing_8k_count_30d"] = 0
return panel
panel["date"] = pd.to_datetime(panel["date"])
eightk["filing_date"] = pd.to_datetime(eightk["filing_date"])
daily = (
eightk.groupby(["ticker", "filing_date"])
.size()
.reset_index(name="n")
.rename(columns={"filing_date": "date"})
)
panel_dates_i64 = panel["date"].values.astype("int64")
panel_groups = {
t: idx for t, idx in panel.groupby("ticker", sort=False).indices.items()
}
panel["filing_8k_count_30d"] = _rolling_window_count(
panel_dates_i64, panel_groups, daily, window_days=30, n_rows=len(panel),
)
logger.info(" Added filing_8k_count_30d")
return panel
# ---------------------------------------------------------------------------
# 2. News/PR counts from SEC 8-K filings (covers full 2021-2026 period)
# ---------------------------------------------------------------------------
def _add_news_counts(panel: pd.DataFrame) -> pd.DataFrame:
"""Add ``news_count_7d`` and ``has_press_release_7d`` from SEC 8-K filings.
8-K filings are material event disclosures — effectively press releases
filed with the SEC. For small/micro-cap companies, 8-K filings are the
most reliable per-ticker news source (mainstream media coverage is sparse).
"""
if "news_count_7d" in panel.columns:
logger.info(" news_count_7d already present – skipping")
return panel
panel["date"] = pd.to_datetime(panel["date"])
# Collect 8-K filing dates per ticker from the filings directory
filings_dir = config.FILINGS_DIR
rows_8k: list[dict] = []
if filings_dir.exists():
for ticker_dir in filings_dir.iterdir():
if not ticker_dir.is_dir():
continue
ticker = ticker_dir.name
for filing in ticker_dir.glob("*.md"):
# Filing names typically contain the type and date
# e.g., "8-K_2023-07-26.md" or "8-K_20230726_..."
fname = filing.stem
if "8-K" not in fname.upper() and "8K" not in fname.upper():
continue
# Extract date from filename
import re
date_match = re.search(r"(\d{4}-\d{2}-\d{2})", fname)
if not date_match:
date_match = re.search(r"(\d{4})(\d{2})(\d{2})", fname)
if date_match:
date_str = f"{date_match.group(1)}-{date_match.group(2)}-{date_match.group(3)}"
else:
continue
else:
date_str = date_match.group(1)
try:
ts = pd.Timestamp(date_str)
rows_8k.append({"ticker": ticker, "date": ts})
except Exception:
continue
panel_dates_i64 = panel["date"].values.astype("int64")
panel_groups = {
t: idx for t, idx in panel.groupby("ticker", sort=False).indices.items()
}
if rows_8k:
filing_df = pd.DataFrame(rows_8k)
filing_df["date"] = pd.to_datetime(filing_df["date"]).dt.normalize()
daily_8k = filing_df.groupby(["ticker", "date"]).size().reset_index(name="n")
logger.info(" Found %d 8-K filing events across %d tickers",
len(daily_8k), filing_df["ticker"].nunique())
panel["news_count_7d"] = _rolling_window_count(
panel_dates_i64, panel_groups, daily_8k,
window_days=7, n_rows=len(panel),
)
panel["has_press_release_7d"] = panel["news_count_7d"] > 0
else:
logger.warning(" No 8-K filings found – filling with defaults")
panel["news_count_7d"] = 0
panel["has_press_release_7d"] = False
logger.info(" Added news_count_7d and has_press_release_7d (from 8-K filings)")
return panel
# ---------------------------------------------------------------------------
# 3. Scenario news context
# ---------------------------------------------------------------------------
def _enrich_scenarios(scenarios_path: Path) -> None:
"""Add ``news_context`` column to scenarios.parquet."""
if not scenarios_path.exists():
logger.warning("scenarios.parquet not found – skipping scenario enrichment")
return
df = pd.read_parquet(scenarios_path)
if "news_context" in df.columns:
logger.info(" news_context already present – skipping")
return
scenarios_dir = config.NEWS_DIR / "scenarios"
contexts = []
for _, row in df.iterrows():
sc_id = row["scenario_id"]
news_path = scenarios_dir / f"{sc_id}.json"
if news_path.exists():
try:
articles = json.loads(news_path.read_text(encoding="utf-8"))
top_articles = [
{
"title": a.get("title", ""),
"snippet": a.get("snippet", ""),
"date": a.get("date", ""),
}
for a in articles
]
contexts.append(json.dumps(top_articles))
except Exception:
contexts.append("[]")
else:
contexts.append("[]")
df["news_context"] = contexts
df.to_parquet(scenarios_path, index=False)
logger.info(" Added news_context to %d scenarios", len(df))
# ---------------------------------------------------------------------------
# Public entry point
# ---------------------------------------------------------------------------
def run(granularity: str | None = None) -> None:
"""Enrich L3 benchmark panels and scenarios with news-derived features."""
if granularity is None:
granularity = config.GRANULARITY
benchmark_dir = config.get_benchmark_dir(granularity)
corpus_path = benchmark_dir / "filing_corpus.parquet"
for split in ("panel_train.parquet", "panel_test.parquet"):
panel_path = benchmark_dir / split
if not panel_path.exists():
logger.warning("%s not found – skipping", panel_path)
continue
logger.info("Enriching %s …", split)
panel = pd.read_parquet(panel_path)
panel = _add_8k_counts(panel, corpus_path)
panel.to_parquet(panel_path, index=False)
logger.info(" Checkpoint: saved after 8-K enrichment")
panel = _add_news_counts(panel)
panel.to_parquet(panel_path, index=False)
logger.info(
" Saved enriched %s (%d rows, %d cols)",
split, len(panel), len(panel.columns),
)
scenarios_path = benchmark_dir / "scenarios.parquet"
_enrich_scenarios(scenarios_path)
logger.info("Benchmark enrichment complete.")