File size: 10,537 Bytes
5995ef5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
"""Step 11 – Enrich benchmark panels with news-derived features.

Lightweight post-processing that adds columns to the **L3 benchmark**
panels (not L2 processed) and enriches ``scenarios.parquet`` with
collected news context.

New columns added to ``panel_train.parquet`` / ``panel_test.parquet``:
  * ``filing_8k_count_30d`` (int)  – 8-K filings in the past 30 days
  * ``news_count_7d``       (int)  – yfinance news articles in past 7 days
  * ``has_press_release_7d`` (bool) – press release in past 7 days

New column added to ``scenarios.parquet``:
  * ``news_context`` (str, JSON) – top-5 scenario news articles

Resume: skips if columns already exist in parquet files.
"""

from __future__ import annotations

import json
import logging
from pathlib import Path

import numpy as np
import pandas as pd

from . import config

logger = logging.getLogger(__name__)


# ---------------------------------------------------------------------------
# Helper: rolling-window count via prefix-sum + searchsorted
# ---------------------------------------------------------------------------

def _rolling_window_count(
    panel_dates_i64: np.ndarray,
    panel_groups: dict[str, np.ndarray],
    events: pd.DataFrame,
    window_days: int,
    n_rows: int,
) -> np.ndarray:
    """Count events within a rolling calendar-day window per ticker.

    Uses cumulative-sum differencing with ``np.searchsorted`` – loops
    over tickers that have events (typically a small subset), but each
    iteration is pure numpy O(n log m).

    Parameters
    ----------
    panel_dates_i64 : int64 nanosecond timestamps for all panel rows
    panel_groups : dict mapping ticker → integer row indices in the panel
    events : DataFrame with columns [ticker, date, n] (daily counts)
    window_days : size of the look-back window (inclusive both ends)
    n_rows : total number of rows in the panel

    Returns
    -------
    np.ndarray[int64] of length *n_rows*.
    """
    result = np.zeros(n_rows, dtype=np.int64)

    if events.empty:
        return result

    window_ns = np.int64((window_days + 1) * 86_400_000_000_000)

    for ticker, ev_group in events.groupby("ticker"):
        if ticker not in panel_groups:
            continue

        panel_idx = panel_groups[ticker]
        p_dates = panel_dates_i64[panel_idx]

        ev_sorted = ev_group.sort_values("date")
        e_dates = ev_sorted["date"].values.astype("int64")
        e_cumsum = ev_sorted["n"].values.cumsum()

        upper_pos = np.searchsorted(e_dates, p_dates, side="right") - 1
        upper_cs = np.where(upper_pos >= 0, e_cumsum[upper_pos], 0)

        lower_dates = p_dates - window_ns
        lower_pos = np.searchsorted(e_dates, lower_dates, side="right") - 1
        lower_cs = np.where(lower_pos >= 0, e_cumsum[lower_pos], 0)

        result[panel_idx] = upper_cs - lower_cs

    return result


# ---------------------------------------------------------------------------
# 1. Filing 8-K count
# ---------------------------------------------------------------------------

def _add_8k_counts(panel: pd.DataFrame, corpus_path: Path) -> pd.DataFrame:
    """Add ``filing_8k_count_30d`` (fully vectorised, no calendar reindexing)."""
    if "filing_8k_count_30d" in panel.columns:
        logger.info("  filing_8k_count_30d already present – skipping")
        return panel

    if not corpus_path.exists():
        logger.warning("filing_corpus.parquet not found – filling 8k count with 0")
        panel["filing_8k_count_30d"] = 0
        return panel

    corpus = pd.read_parquet(corpus_path)
    eightk = corpus[corpus["filing_type"] == "8-K"].copy()

    if eightk.empty:
        logger.info("  No 8-K filings in corpus – filling with 0")
        panel["filing_8k_count_30d"] = 0
        return panel

    panel["date"] = pd.to_datetime(panel["date"])
    eightk["filing_date"] = pd.to_datetime(eightk["filing_date"])

    daily = (
        eightk.groupby(["ticker", "filing_date"])
        .size()
        .reset_index(name="n")
        .rename(columns={"filing_date": "date"})
    )

    panel_dates_i64 = panel["date"].values.astype("int64")
    panel_groups = {
        t: idx for t, idx in panel.groupby("ticker", sort=False).indices.items()
    }

    panel["filing_8k_count_30d"] = _rolling_window_count(
        panel_dates_i64, panel_groups, daily, window_days=30, n_rows=len(panel),
    )
    logger.info("  Added filing_8k_count_30d")
    return panel


# ---------------------------------------------------------------------------
# 2. News/PR counts from SEC 8-K filings (covers full 2021-2026 period)
# ---------------------------------------------------------------------------

def _add_news_counts(panel: pd.DataFrame) -> pd.DataFrame:
    """Add ``news_count_7d`` and ``has_press_release_7d`` from SEC 8-K filings.

    8-K filings are material event disclosures — effectively press releases
    filed with the SEC.  For small/micro-cap companies, 8-K filings are the
    most reliable per-ticker news source (mainstream media coverage is sparse).
    """
    if "news_count_7d" in panel.columns:
        logger.info("  news_count_7d already present – skipping")
        return panel

    panel["date"] = pd.to_datetime(panel["date"])

    # Collect 8-K filing dates per ticker from the filings directory
    filings_dir = config.FILINGS_DIR
    rows_8k: list[dict] = []
    if filings_dir.exists():
        for ticker_dir in filings_dir.iterdir():
            if not ticker_dir.is_dir():
                continue
            ticker = ticker_dir.name
            for filing in ticker_dir.glob("*.md"):
                # Filing names typically contain the type and date
                # e.g., "8-K_2023-07-26.md" or "8-K_20230726_..."
                fname = filing.stem
                if "8-K" not in fname.upper() and "8K" not in fname.upper():
                    continue
                # Extract date from filename
                import re
                date_match = re.search(r"(\d{4}-\d{2}-\d{2})", fname)
                if not date_match:
                    date_match = re.search(r"(\d{4})(\d{2})(\d{2})", fname)
                    if date_match:
                        date_str = f"{date_match.group(1)}-{date_match.group(2)}-{date_match.group(3)}"
                    else:
                        continue
                else:
                    date_str = date_match.group(1)
                try:
                    ts = pd.Timestamp(date_str)
                    rows_8k.append({"ticker": ticker, "date": ts})
                except Exception:
                    continue

    panel_dates_i64 = panel["date"].values.astype("int64")
    panel_groups = {
        t: idx for t, idx in panel.groupby("ticker", sort=False).indices.items()
    }

    if rows_8k:
        filing_df = pd.DataFrame(rows_8k)
        filing_df["date"] = pd.to_datetime(filing_df["date"]).dt.normalize()
        daily_8k = filing_df.groupby(["ticker", "date"]).size().reset_index(name="n")
        logger.info("  Found %d 8-K filing events across %d tickers",
                     len(daily_8k), filing_df["ticker"].nunique())
        panel["news_count_7d"] = _rolling_window_count(
            panel_dates_i64, panel_groups, daily_8k,
            window_days=7, n_rows=len(panel),
        )
        panel["has_press_release_7d"] = panel["news_count_7d"] > 0
    else:
        logger.warning("  No 8-K filings found – filling with defaults")
        panel["news_count_7d"] = 0
        panel["has_press_release_7d"] = False

    logger.info("  Added news_count_7d and has_press_release_7d (from 8-K filings)")
    return panel


# ---------------------------------------------------------------------------
# 3. Scenario news context
# ---------------------------------------------------------------------------

def _enrich_scenarios(scenarios_path: Path) -> None:
    """Add ``news_context`` column to scenarios.parquet."""
    if not scenarios_path.exists():
        logger.warning("scenarios.parquet not found – skipping scenario enrichment")
        return

    df = pd.read_parquet(scenarios_path)

    if "news_context" in df.columns:
        logger.info("  news_context already present – skipping")
        return

    scenarios_dir = config.NEWS_DIR / "scenarios"
    contexts = []

    for _, row in df.iterrows():
        sc_id = row["scenario_id"]
        news_path = scenarios_dir / f"{sc_id}.json"
        if news_path.exists():
            try:
                articles = json.loads(news_path.read_text(encoding="utf-8"))
                top_articles = [
                    {
                        "title": a.get("title", ""),
                        "snippet": a.get("snippet", ""),
                        "date": a.get("date", ""),
                    }
                    for a in articles
                ]
                contexts.append(json.dumps(top_articles))
            except Exception:
                contexts.append("[]")
        else:
            contexts.append("[]")

    df["news_context"] = contexts
    df.to_parquet(scenarios_path, index=False)
    logger.info("  Added news_context to %d scenarios", len(df))


# ---------------------------------------------------------------------------
# Public entry point
# ---------------------------------------------------------------------------

def run(granularity: str | None = None) -> None:
    """Enrich L3 benchmark panels and scenarios with news-derived features."""
    if granularity is None:
        granularity = config.GRANULARITY

    benchmark_dir = config.get_benchmark_dir(granularity)
    corpus_path = benchmark_dir / "filing_corpus.parquet"

    for split in ("panel_train.parquet", "panel_test.parquet"):
        panel_path = benchmark_dir / split
        if not panel_path.exists():
            logger.warning("%s not found – skipping", panel_path)
            continue

        logger.info("Enriching %s …", split)
        panel = pd.read_parquet(panel_path)

        panel = _add_8k_counts(panel, corpus_path)
        panel.to_parquet(panel_path, index=False)
        logger.info("  Checkpoint: saved after 8-K enrichment")

        panel = _add_news_counts(panel)
        panel.to_parquet(panel_path, index=False)
        logger.info(
            "  Saved enriched %s (%d rows, %d cols)",
            split, len(panel), len(panel.columns),
        )

    scenarios_path = benchmark_dir / "scenarios.parquet"
    _enrich_scenarios(scenarios_path)

    logger.info("Benchmark enrichment complete.")