"""Layer 2: Preprocess raw data into a task-agnostic panel. A **pure function** of (raw data files + config). No API calls, no side effects. Given the same raw data and config the output is deterministic. Takes ``config.GRANULARITY`` (``"daily"``, ``"weekly"``, ``"monthly"``) and produces: data/processed/{granularity}/panel.parquet -- merged panel data/processed/{granularity}/columns.json -- column-name groups Steps: 2a. Load raw data (no transformations) 2b. Resample to target granularity 2c. Merge into panel 2d. Derive time-varying metrics 2e. Save """ from __future__ import annotations import json import logging import re from pathlib import Path import numpy as np import pandas as pd from . import config logger = logging.getLogger(__name__) # =================================================================== # 2a -- Load helpers (pure loading, no transformations) # =================================================================== def _load_prices() -> pd.DataFrame: """Load raw daily prices and filter out rows with NaN close.""" path = config.PRICES_DIR / "daily_prices.csv" if not path.exists(): raise FileNotFoundError(f"Run Step 3 first: {path}") df = pd.read_csv(path, parse_dates=["Date"]) df = df.rename(columns={ "Date": "date", "Ticker": "ticker", "Open": "open", "High": "high", "Low": "low", "Close": "close", "Volume": "volume", "Adj Close": "adj_close", }) # Keep only known columns (guard against extras) keep = ["ticker", "date", "open", "high", "low", "close", "volume", "adj_close"] df = df[[c for c in keep if c in df.columns]] # Explicitly coerce date to datetime64 — mixed formats (e.g. manually-appended # rows with time components) can cause pd.read_csv to fall back to object dtype # even with parse_dates=. Downstream resampling requires datetime64. df["date"] = pd.to_datetime(df["date"], errors="coerce") # Bad Yahoo data recovery: negative adj_close is impossible (e.g. CBIO had # negative adj_close values that flipped downstream derivations). # Recovery: fall back to close value (loses dividend adjustment for those # rows but preserves valid positive price data — better than NaN). if "adj_close" in df.columns: bad_adj = df["adj_close"] < 0 if bad_adj.any(): n = int(bad_adj.sum()) df.loc[bad_adj, "adj_close"] = df.loc[bad_adj, "close"] logger.info("Recovery: replaced %d negative adj_close rows with close value", n) # OHLC invariant enforcement: high = max(O,H,L,C), low = min(O,H,L,C). # A handful of Yahoo rows have highopen (e.g. CWEN-A 2021-05-05, # SITC 2021-05-05, UA 2021-05-05, WLY 2021-05-05, CWEN-A 2023-06-05). # Preserves all four values while forcing the invariant to hold. if all(c in df.columns for c in ("open", "high", "low", "close")): prev_bad = ((df["high"] < df[["open", "low", "close"]].max(axis=1)) | (df["low"] > df[["open", "high", "close"]].min(axis=1))).sum() if prev_bad: ohlc = df[["open", "high", "low", "close"]].to_numpy() df["high"] = ohlc.max(axis=1) df["low"] = ohlc.min(axis=1) logger.info("OHLC sanity: enforced high=max(O,H,L,C) / low=min(O,H,L,C) on %d rows", int(prev_bad)) # Defensive: drop rows where close is NaN (junk/delisted/pre-listing) before = len(df) df = df.dropna(subset=["close"]) dropped = before - len(df) if dropped > 0: logger.info("Dropped %d rows with NaN close in price data.", dropped) return df.sort_values(["ticker", "date"]).reset_index(drop=True) def _load_statement_long(ticker: str) -> pd.DataFrame: """Load per-ticker statement CSVs into long-form (date, metric, value). yfinance statement CSVs: index = metric names, columns = date strings. """ records: list[dict] = [] for suffix, key_map in [ ("income", config.INCOME_KEYS), ("balance", config.BALANCE_KEYS), ("cashflow", config.CASHFLOW_KEYS), ]: csv_path = config.FUNDAMENTALS_DIR / f"{ticker}_{suffix}.csv" if not csv_path.exists(): continue try: raw = pd.read_csv(csv_path, index_col=0) for orig_name, col_name in key_map.items(): if orig_name in raw.index: row = raw.loc[orig_name] for date_str, val in row.items(): try: records.append({ "date": pd.to_datetime(date_str), "metric": col_name, "value": pd.to_numeric(val, errors="coerce"), }) except Exception: continue except Exception as exc: logger.debug("Could not load %s for %s: %s", suffix, ticker, exc) if not records: return pd.DataFrame() long_df = pd.DataFrame(records) # Pivot: rows=date, columns=metric, values=value wide = long_df.pivot_table(index="date", columns="metric", values="value", aggfunc="first") wide = wide.reset_index().sort_values("date") wide.columns.name = None # Compute trailing-twelve-month (TTM) sums for flow metrics. # Balance-sheet items (stock variables) are point-in-time and don't need TTM. _FLOW_METRICS = { "stmt_revenue", "stmt_net_income", "stmt_ebitda", "stmt_ebit", "stmt_gross_profit", "stmt_operating_income", "stmt_basic_eps", "stmt_operating_cashflow", "stmt_free_cashflow", "stmt_capex", "stmt_cogs", "stmt_operating_expenses", "stmt_financing_cashflow", } for col in list(wide.columns): if col in _FLOW_METRICS: ttm_col = f"{col}_ttm" wide[ttm_col] = wide[col].rolling(window=4, min_periods=4).sum() wide["ticker"] = ticker return wide def _load_xbrl_statements(tickers: list[str]) -> pd.DataFrame: """Load historical financial statements from SEC EDGAR XBRL facts. Reads ``data/xbrl/parsed/company_facts.parquet`` and pivots the relevant tags into the same (ticker, date, stmt_*) wide format that ``_load_statement_long`` produces. This gives us quarterly data going back 10+ years — far beyond yfinance's ~5-quarter window. Returns an empty DataFrame if the XBRL data is unavailable. """ xbrl_path = config.DATA_DIR / "xbrl" / "parsed" / "company_facts.parquet" if not xbrl_path.exists(): logger.warning("XBRL facts not found at %s — skipping.", xbrl_path) return pd.DataFrame() # Collect all XBRL tags we care about wanted_tags: set[str] = set() for tags in config.XBRL_TAG_MAP.values(): wanted_tags.update(tags) wanted_tags.update(config.XBRL_DA_TAGS) # Add balance equation validation tag (not mapped to a column, used for equity fix) wanted_tags.add("LiabilitiesAndStockholdersEquity") facts = pd.read_parquet( xbrl_path, columns=["ticker", "tag", "period_start", "period_end", "value", "form", "fiscal_year", "fiscal_period", "filed"], ) # Keep 10-K/10-Q (US), 20-F/6-K (foreign), 40-F (Canadian) for tickers in our universe facts = facts[ facts["form"].isin(["10-K", "10-Q", "10-K/A", "10-Q/A", "20-F", "20-F/A", "6-K", "40-F", "40-F/A"]) & facts["ticker"].isin(tickers) & facts["tag"].isin(wanted_tags) ].copy() if facts.empty: logger.warning("No matching XBRL facts after filtering.") return pd.DataFrame() facts["period_start"] = pd.to_datetime(facts["period_start"], errors="coerce") facts["period_end"] = pd.to_datetime(facts["period_end"], errors="coerce") facts["filed"] = pd.to_datetime(facts["filed"], errors="coerce") facts["value"] = pd.to_numeric(facts["value"], errors="coerce") facts = facts.dropna(subset=["period_end", "value"]) # --- Separate flow metrics into standalone quarterly values --- # XBRL 10-Q filings report BOTH cumulative year-to-date figures # (e.g. 6-month Q1+Q2) and standalone 3-month quarter figures at # the same period_end. We need standalone quarter values for correct # TTM computation. Additionally, Q4 standalone values only exist # in the 10-K as full-year (FY), so we derive Q4 = FY - (Q1+Q2+Q3). _BALANCE_SHEET_TAGS = { "Assets", "Liabilities", "LiabilitiesAndStockholdersEquity", "StockholdersEquity", "StockholdersEquityIncludingPortionAttributableToNoncontrollingInterest", "LongTermDebt", "LongTermDebtAndCapitalLeaseObligations", "ShortTermBorrowings", "DebtCurrent", "LongTermDebtNoncurrent", "CashAndCashEquivalentsAtCarryingValue", "CashCashEquivalentsRestrictedCashAndRestrictedCashEquivalents", "EntityCommonStockSharesOutstanding", "CommonStockSharesOutstanding", "WeightedAverageNumberOfShareOutstandingBasicAndDiluted", "WeightedAverageNumberOfSharesOutstandingBasic", "WeightedAverageNumberOfDilutedSharesOutstanding", "CommonSharesIssued", "CommonSharesOutstanding", # New balance-sheet detail tags "AccountsReceivableNetCurrent", "AccountsReceivableNet", "TradeAndOtherCurrentReceivables", "InventoryNet", "Inventories", "CurrentInventories", "AssetsCurrent", "CurrentAssets", "PropertyPlantAndEquipmentNet", "PropertyPlantAndEquipment", "Goodwill", "GoodwillGross", "AccountsPayableCurrent", "AccountsPayable", "TradeAndOtherCurrentPayables", "LiabilitiesCurrent", "CurrentLiabilities", } # Split: balance sheet tags keep all periods; flow tags need duration filtering is_balance = facts["tag"].isin(_BALANCE_SHEET_TAGS) balance_facts = facts[is_balance].copy() flow_facts = facts[~is_balance].copy() # For flow facts: keep only standalone quarter values (duration ≤ 100 days) quarterly_flow = flow_facts[flow_facts["fiscal_period"].isin(["Q1", "Q2", "Q3", "Q4"])].copy() if quarterly_flow["period_start"].notna().any(): duration = (quarterly_flow["period_end"] - quarterly_flow["period_start"]).dt.days quarterly_flow = quarterly_flow[duration.isna() | (duration <= 100)] # Derive Q4 = FY_value - sum(Q1+Q2+Q3 within FY date range). # Vectorised: cross-join Q1-Q3 onto FY by (ticker, tag), filter by # date range, aggregate, then subtract. fy_flow = flow_facts[flow_facts["fiscal_period"] == "FY"].copy() if not fy_flow.empty and not quarterly_flow.empty: fy_deduped = fy_flow.sort_values("filed").drop_duplicates( subset=["ticker", "tag", "period_end"], keep="last", ).dropna(subset=["period_start", "period_end"]) q_deduped = quarterly_flow.sort_values("filed").drop_duplicates( subset=["ticker", "tag", "period_end"], keep="last", ) q_deduped = q_deduped[q_deduped["fiscal_period"].isin(["Q1", "Q2", "Q3"])] if not fy_deduped.empty and not q_deduped.empty: fy_key = fy_deduped[["ticker", "tag", "period_start", "period_end", "value"]].copy() fy_key = fy_key.rename(columns={ "period_start": "fy_start", "period_end": "fy_end", "value": "fy_value", }) q_key = q_deduped[["ticker", "tag", "period_end", "value"]].copy() q_key = q_key.rename(columns={"period_end": "q_end", "value": "q_value"}) merged = fy_key.merge(q_key, on=["ticker", "tag"], how="inner") merged = merged[(merged["q_end"] > merged["fy_start"]) & (merged["q_end"] <= merged["fy_end"])] agg = merged.groupby(["ticker", "tag", "fy_end"]).agg( q_count=("q_value", "size"), q_sum=("q_value", "sum"), fy_value=("fy_value", "first"), fy_start=("fy_start", "first"), ).reset_index() agg = agg[agg["q_count"] == 3] agg["q4_value"] = agg["fy_value"] - agg["q_sum"] agg = agg[agg["q4_value"] > 0] if not agg.empty: q4_rows = fy_deduped.merge( agg[["ticker", "tag", "fy_end", "q4_value"]], left_on=["ticker", "tag", "period_end"], right_on=["ticker", "tag", "fy_end"], how="inner", ) q4_rows["value"] = q4_rows["q4_value"] q4_rows["fiscal_period"] = "Q4_derived" q4_rows = q4_rows.drop(columns=["fy_end", "q4_value"], errors="ignore") quarterly_flow = pd.concat([quarterly_flow, q4_rows], ignore_index=True) logger.info("Derived %d Q4 standalone values from FY - (Q1+Q2+Q3).", len(q4_rows)) # Recombine balance sheet + flow facts balance_facts = balance_facts[balance_facts["fiscal_period"].isin( ["Q1", "Q2", "Q3", "Q4", "FY"] )] facts = pd.concat([balance_facts, quarterly_flow], ignore_index=True) # Deduplicate: keep the latest filing per (ticker, tag, period_end) facts = facts.sort_values("filed").drop_duplicates( subset=["ticker", "tag", "period_end"], keep="last", ) # --- Resolve priority: for each stmt_ column pick the first available tag --- col_frames: dict[str, pd.DataFrame] = {} for stmt_col, tag_list in config.XBRL_TAG_MAP.items(): # Try tags in priority order; only use lower-priority tags for # (ticker, period_end) combinations not covered by higher ones. parts: list[pd.DataFrame] = [] covered_keys: set[tuple[str, pd.Timestamp]] = set() for tag in tag_list: subset = facts[facts["tag"] == tag][ ["ticker", "period_end", "value"] ].copy() if subset.empty: continue if covered_keys: keep = [ (t, d) not in covered_keys for t, d in zip(subset["ticker"], subset["period_end"]) ] subset = subset[keep] if subset.empty: continue covered_keys.update( zip(subset["ticker"], subset["period_end"]) ) parts.append(subset) if parts: combined = pd.concat(parts, ignore_index=True) combined = combined.rename(columns={"value": stmt_col}) col_frames[stmt_col] = combined if not col_frames: logger.warning("No XBRL facts resolved to stmt_ columns.") return pd.DataFrame() # Merge all stmt_ columns into one wide DataFrame keyed by (ticker, period_end) items = iter(col_frames.values()) wide = next(items) for extra in items: wide = wide.merge(extra, on=["ticker", "period_end"], how="outer") # --- Derive composite metrics --- # EBITDA = Operating Income + D&A if "stmt_ebit" in wide.columns: da_facts = facts[facts["tag"].isin(config.XBRL_DA_TAGS)].copy() if not da_facts.empty: da_facts = da_facts.sort_values("filed").drop_duplicates( subset=["ticker", "period_end"], keep="last", ) da_map = da_facts.set_index(["ticker", "period_end"])["value"] wide_idx = wide.set_index(["ticker", "period_end"]) da_aligned = da_map.reindex(wide_idx.index) ebitda_derived = wide_idx["stmt_ebit"] + da_aligned if "stmt_ebitda" not in wide.columns: wide["stmt_ebitda"] = ebitda_derived.values else: mask = wide["stmt_ebitda"].isna() wide.loc[mask, "stmt_ebitda"] = ebitda_derived.values[mask.values] # Free Cash Flow = Operating CF - CapEx if "stmt_operating_cashflow" in wide.columns and "stmt_capex" in wide.columns: if "stmt_free_cashflow" not in wide.columns: wide["stmt_free_cashflow"] = ( wide["stmt_operating_cashflow"] - wide["stmt_capex"].abs() ) else: mask = wide["stmt_free_cashflow"].isna() wide.loc[mask, "stmt_free_cashflow"] = ( wide.loc[mask, "stmt_operating_cashflow"] - wide.loc[mask, "stmt_capex"].abs() ) # Tax rate = Tax / Pretax if "stmt_tax_provision" in wide.columns and "stmt_pretax_income" in wide.columns: if "stmt_tax_rate" not in wide.columns: pretax = wide["stmt_pretax_income"].replace(0, np.nan) wide["stmt_tax_rate"] = (wide["stmt_tax_provision"].abs() / pretax).clip(0, 0.5) # Gross Profit derivation: if missing, derive from Revenue - COGS if "stmt_revenue" in wide.columns and "stmt_cogs" in wide.columns: if "stmt_gross_profit" not in wide.columns: wide["stmt_gross_profit"] = wide["stmt_revenue"] - wide["stmt_cogs"].abs() else: mask = wide["stmt_gross_profit"].isna() wide.loc[mask, "stmt_gross_profit"] = ( wide.loc[mask, "stmt_revenue"] - wide.loc[mask, "stmt_cogs"].abs() ) # --- Balance equation fix (multi-pass) --- # The accounting identity Assets = Liabilities + Equity should hold exactly, # but XBRL data has several failure modes: # (a) Equity tag excludes NCI while Assets/Liabilities are consolidated totals # (b) Liabilities tag is missing but Assets and Equity are present # (c) Assets tag is missing but Liabilities and Equity are present # (d) All three present but mutually inconsistent (issuer error) # Strategy: run multiple fix passes, then drop rows that still mismatch > 1%. if all(c in wide.columns for c in ["stmt_total_assets", "stmt_total_liabilities", "stmt_total_equity"]): # Build lookup for LiabilitiesAndStockholdersEquity cross-check lae_facts = facts[facts["tag"] == "LiabilitiesAndStockholdersEquity"][ ["ticker", "period_end", "value", "filed"] ].copy() lae_map = pd.Series(dtype=float) if not lae_facts.empty: lae_facts = lae_facts.sort_values("filed").drop_duplicates( subset=["ticker", "period_end"], keep="last", ) lae_map = lae_facts.set_index(["ticker", "period_end"])["value"] # Work on indexed copy widx = wide.set_index(["ticker", "period_end"]) A = widx["stmt_total_assets"] L = widx["stmt_total_liabilities"] E = widx["stmt_total_equity"] lae = lae_map.reindex(widx.index) if not lae_map.empty else pd.Series(np.nan, index=widx.index) def _rel_diff(x, y): return (x - y).abs() / x.abs().replace(0, np.nan) TOL = 0.01 # 1% tolerance pre_bad = _rel_diff(A, L.fillna(0) + E.fillna(0)) > TOL pre_bad_count = pre_bad.sum() fix_counts = {} # Pass 1: Both L and E missing → unfixable, skip # Pass 2: E missing, A and L present → E = A - L pass2_mask = A.notna() & L.notna() & E.isna() if pass2_mask.any(): widx.loc[pass2_mask, "stmt_total_equity"] = (A - L)[pass2_mask] fix_counts["derive_E_from_A_minus_L"] = pass2_mask.sum() # Pass 3: L missing, A and E present → L = A - E pass3_mask = A.notna() & L.isna() & E.notna() if pass3_mask.any(): widx.loc[pass3_mask, "stmt_total_liabilities"] = (A - E)[pass3_mask] fix_counts["derive_L_from_A_minus_E"] = pass3_mask.sum() # Pass 4: A missing but LAE or (L+E) present → A = L + E pass4_mask = A.isna() & L.notna() & E.notna() if pass4_mask.any(): widx.loc[pass4_mask, "stmt_total_assets"] = (L + E)[pass4_mask] fix_counts["derive_A_from_L_plus_E"] = pass4_mask.sum() # Refresh after passes 2-4 A = widx["stmt_total_assets"] L = widx["stmt_total_liabilities"] E = widx["stmt_total_equity"] # Pass 5: A ≈ LAE but (L + E) ≠ A → equity tag is wrong, derive E = A - L if not lae_map.empty: lae_aligned = lae_map.reindex(widx.index) bad5 = (_rel_diff(A, L.fillna(0) + E.fillna(0)) > TOL) & \ (_rel_diff(A, lae_aligned) <= TOL) & lae_aligned.notna() & \ A.notna() & L.notna() if bad5.any(): widx.loc[bad5, "stmt_total_equity"] = (A - L)[bad5] fix_counts["equity_fix_via_LAE"] = bad5.sum() # Pass 6: A ≠ LAE but LAE ≈ (L + E) → Assets tag is wrong, use LAE as A refresh_E = widx["stmt_total_equity"] bad6 = (_rel_diff(A, lae_aligned) > TOL) & \ (_rel_diff(lae_aligned, L.fillna(0) + refresh_E.fillna(0)) <= TOL) & \ lae_aligned.notna() & L.notna() & refresh_E.notna() if bad6.any(): widx.loc[bad6, "stmt_total_assets"] = lae_aligned[bad6] fix_counts["assets_fix_via_LAE"] = bad6.sum() # Final check: any remaining > 1% mismatches get ALL THREE set to NaN # (unreliable data — don't let it pollute derived metrics) A = widx["stmt_total_assets"] L = widx["stmt_total_liabilities"] E = widx["stmt_total_equity"] still_bad = _rel_diff(A, L.fillna(0) + E.fillna(0)) > TOL if still_bad.any(): n_drop = still_bad.sum() widx.loc[still_bad, ["stmt_total_assets", "stmt_total_liabilities", "stmt_total_equity"]] = np.nan fix_counts["dropped_unreliable"] = n_drop wide = widx.reset_index() total_fixes = sum(fix_counts.values()) logger.info( "Balance equation: %d pre-fix mismatches. Applied: %s. Total fixed/dropped: %d / %d.", int(pre_bad_count), fix_counts, total_fixes, len(wide), ) wide = wide.rename(columns={"period_end": "date"}) wide = wide.sort_values(["ticker", "date"]).reset_index(drop=True) # Compute TTM (trailing-twelve-month) rolling sums for flow metrics, # matching what _load_statement_long does for yfinance data. # We must compute per-ticker on the non-null subset only, because the # wide DataFrame has NaN gaps (different metrics populate different rows). _FLOW_METRICS = { "stmt_revenue", "stmt_net_income", "stmt_ebitda", "stmt_ebit", "stmt_gross_profit", "stmt_operating_income", "stmt_basic_eps", "stmt_operating_cashflow", "stmt_free_cashflow", "stmt_capex", "stmt_cogs", "stmt_operating_expenses", "stmt_financing_cashflow", } for col in list(wide.columns): if col in _FLOW_METRICS: ttm_col = f"{col}_ttm" wide[ttm_col] = np.nan for ticker, grp in wide.groupby("ticker"): valid = grp[col].dropna() if len(valid) >= 4: ttm_vals = valid.rolling(window=4, min_periods=4).sum() wide.loc[ttm_vals.index, ttm_col] = ttm_vals logger.info( "Loaded XBRL statements: %d rows, %d tickers, %d stmt columns, " "date range %s to %s.", len(wide), wide["ticker"].nunique(), sum(1 for c in wide.columns if c.startswith("stmt_")), wide["date"].min().date(), wide["date"].max().date(), ) return wide def _load_macro_raw() -> pd.DataFrame: """Load all FRED + EIA CSVs into one date-indexed DataFrame (native granularity).""" macro = pd.DataFrame() # FRED series for series_id in config.FRED_SERIES: csv_path = config.MACRO_DIR / f"fred_{series_id}.csv" if not csv_path.exists(): continue try: df = pd.read_csv(csv_path) if "date" not in df.columns: continue df["date"] = pd.to_datetime(df["date"]) non_date = [c for c in df.columns if c != "date"] if not non_date: logger.warning("FRED %s CSV has no value column, skipping.", series_id) continue col = series_id if series_id in df.columns else non_date[0] df = df[["date", col]].rename(columns={col: f"fred_{series_id}"}) df[f"fred_{series_id}"] = pd.to_numeric(df[f"fred_{series_id}"], errors="coerce") if macro.empty: macro = df else: macro = macro.merge(df, on="date", how="outer") except Exception as exc: logger.warning("Could not load FRED %s: %s", series_id, exc) # EIA commodities for commodity_type in ["crude_oil", "natural_gas"]: commodity_dir = config.MACRO_DIR / commodity_type if not commodity_dir.is_dir(): continue for csv_file in sorted(commodity_dir.glob("*.csv")): if "_raw" in csv_file.stem: continue try: df = pd.read_csv(csv_file) date_col = next( (c for c in df.columns if "date" in c.lower() or "period" in c.lower() or "time" in c.lower()), None, ) if date_col is None: continue df[date_col] = pd.to_datetime(df[date_col], errors="coerce") df = df.dropna(subset=[date_col]) num_cols = df.select_dtypes(include="number").columns.tolist() if not num_cols: continue col_name = f"eia_{commodity_type}_{csv_file.stem}" df = df[[date_col, num_cols[0]]].rename(columns={date_col: "date", num_cols[0]: col_name}) if macro.empty: macro = df else: macro = macro.merge(df, on="date", how="outer") except Exception as exc: logger.warning("Could not load EIA %s: %s", csv_file.name, exc) if not macro.empty: macro = macro.sort_values("date").reset_index(drop=True) return macro def _load_universe() -> pd.DataFrame: """Load benchmark_universe.csv.""" path = config.UNIVERSE_DIR / "benchmark_universe.csv" if not path.exists(): raise FileNotFoundError(f"Run Step 1 first: {path}") return pd.read_csv(path) def _load_company_info() -> pd.DataFrame: """Load company_info.csv (static metadata only).""" path = config.FUNDAMENTALS_DIR / "company_info.csv" if not path.exists(): return pd.DataFrame() return pd.read_csv(path) def _load_filing_metadata(tickers: list[str]) -> dict[str, list[tuple[pd.Timestamp, str, str]]]: """Scan filings directory for .md files, extract (date, type, path). Returns {ticker: [(filing_date, filing_type, rel_path), ...]}, sorted by date. """ lookup: dict[str, list[tuple[pd.Timestamp, str, str]]] = {} for ticker in tickers: ticker_dir = config.FILINGS_DIR / ticker entries: list[tuple[pd.Timestamp, str, str]] = [] if ticker_dir.is_dir(): for md_file in ticker_dir.glob("*.md"): # Classify against the full set of form types we collect # (see config.SEC_FILING_TYPES). Order matters — check # more-specific variants first (10-K/A before 10-K). name = md_file.name if "10-K/A" in name: ftype = "10-K/A" elif "10-Q/A" in name: ftype = "10-Q/A" elif "10-K" in name: ftype = "10-K" elif "10-Q" in name: ftype = "10-Q" elif "8-K" in name: ftype = "8-K" elif "20-F" in name: ftype = "20-F" elif "40-F" in name: ftype = "40-F" elif "N-CSRS" in name: ftype = "N-CSRS" elif "N-CSR" in name: ftype = "N-CSR" elif "6-K" in name: ftype = "6-K" elif "DEF 14A" in name or "DEF14A" in name: ftype = "DEF 14A" elif "S-1" in name: ftype = "S-1" elif "11-K" in name: ftype = "11-K" else: ftype = "other" match = re.search(r"(\d{4}-\d{2}-\d{2})", md_file.name) if match: try: fdate = pd.Timestamp(match.group(1)) rel_path = str(md_file.relative_to(config.DATA_DIR)) entries.append((fdate, ftype, rel_path)) except Exception: continue entries.sort(key=lambda x: x[0]) lookup[ticker] = entries return lookup def _load_real_estate_summary() -> dict[str, float | int]: """Load raw RE CSVs and compute summary statistics. NOTE: These are static aggregate cross-sectional statistics (counts, means, medians) broadcast identically to every panel row. They do not carry temporal information and introduce negligible data leakage between train/test splits. """ summary: dict[str, float | int] = {} re_dir = config.REAL_ESTATE_DIR for name in ["properties", "rentals", "sales"]: csv_path = re_dir / f"{name}.csv" if csv_path.exists(): try: df = pd.read_csv(csv_path) summary[f"re_{name}_count"] = len(df) for col in ["price", "rent", "squareFootage", "square_footage", "listPrice", "salePrice", "last_sale_price"]: if col in df.columns: vals = pd.to_numeric(df[col], errors="coerce").dropna() if not vals.empty: summary[f"re_{name}_{col}_mean"] = float(vals.mean()) summary[f"re_{name}_{col}_median"] = float(vals.median()) except Exception as exc: logger.warning("Could not load RE %s: %s", name, exc) demo_path = re_dir / "demographics.csv" if demo_path.exists(): try: df = pd.read_csv(demo_path) summary["re_demographics_metros"] = len(df) except Exception: pass return summary # =================================================================== # 2b -- Resample to target granularity # =================================================================== def _resample_prices(prices: pd.DataFrame, granularity: str) -> pd.DataFrame: """Resample OHLCV+adj_close to target granularity.""" if granularity == "daily": return prices freq = "W-FRI" if granularity == "weekly" else "MS" agg: dict[str, str] = { "open": "first", "high": "max", "low": "min", "close": "last", "volume": "sum", } if "adj_close" in prices.columns: agg["adj_close"] = "last" resampled = ( prices .set_index("date") .groupby("ticker") .resample(freq) .agg(agg) .dropna(subset=["close"]) .reset_index() ) return resampled.sort_values(["ticker", "date"]).reset_index(drop=True) def _resample_macro(macro: pd.DataFrame, granularity: str) -> pd.DataFrame: """Resample macro data to target granularity. Aggregation rules (matching the plan): - Rates / indices (FRED series): last value in each period - Volume / production EIA series: sum - All other numeric: last """ if macro.empty or granularity == "daily": return macro freq = "W-FRI" if granularity == "weekly" else "MS" # Build per-column aggregation rules # EIA volume/production series should be summed, everything else uses last _sum_keywords = {"export", "import", "production", "reserves"} agg_map: dict[str, str] = {} for col in macro.columns: if col == "date": continue col_lower = col.lower() if any(kw in col_lower for kw in _sum_keywords): agg_map[col] = "sum" else: agg_map[col] = "last" resampled = ( macro .set_index("date") .resample(freq) .agg(agg_map) .reset_index() ) return resampled.sort_values("date").reset_index(drop=True) # =================================================================== # 2c -- Merge into panel # =================================================================== def _attach_nearest_filing( panel: pd.DataFrame, filing_lookup: dict[str, list[tuple[pd.Timestamp, str, str]]], ) -> pd.DataFrame: """For each (ticker, date), find the most recent filing as-of that date. Uses ``pd.merge_asof`` for vectorised performance instead of iterrows. """ # Build a DataFrame of all filings across all tickers filing_rows: list[dict] = [] for ticker, entries in filing_lookup.items(): for fdate, ftype, fpath in entries: filing_rows.append({ "ticker": ticker, "filing_date": fdate, "filing_type": ftype, "filing_path": fpath, }) if not filing_rows: panel["nearest_filing_type"] = None panel["nearest_filing_date"] = pd.NaT panel["nearest_filing_path"] = None panel["days_since_filing"] = np.nan return panel filings_df = pd.DataFrame(filing_rows) filings_df["filing_date"] = pd.to_datetime(filings_df["filing_date"]) filings_df = filings_df.sort_values("filing_date").reset_index(drop=True) # merge_asof: for each panel row, find the latest filing with filing_date <= panel date panel = panel.sort_values("date").reset_index(drop=True) asof_result = pd.merge_asof( panel[["ticker", "date"]], filings_df, left_on="date", right_on="filing_date", by="ticker", direction="backward", ) panel["nearest_filing_type"] = asof_result["filing_type"].values panel["nearest_filing_date"] = pd.to_datetime(asof_result["filing_date"].values) panel["nearest_filing_path"] = asof_result["filing_path"].values panel["days_since_filing"] = (panel["date"] - panel["nearest_filing_date"]).dt.days return panel # =================================================================== # 2d -- Derive time-varying metrics # =================================================================== def _derive_shares_outstanding(panel: pd.DataFrame, company_info: pd.DataFrame) -> pd.Series: """Compute shares_outstanding via the fallback chain. Priority: 1. stmt_shares_outstanding (balance sheet ``Ordinary Shares Number``) 2. stmt_shares_issued (balance sheet ``Share Issued``) 3. stmt_net_income / stmt_basic_eps (income statement derived) 4. Price-derived via Adj Close split-adjustment ratio """ shares = panel.get("stmt_shares_outstanding") if shares is not None: shares = shares.copy() # Treat zero as missing — zero shares means the XBRL tag was # reported but the company hadn't started reporting real values yet. shares = shares.replace(0, np.nan) else: shares = pd.Series(np.nan, index=panel.index) # Fallback 2: Share Issued if "stmt_shares_issued" in panel.columns: mask = shares.isna() issued = panel.loc[mask, "stmt_shares_issued"].replace(0, np.nan) shares.loc[mask] = issued # Fallback 3: net_income / basic_eps if "stmt_net_income" in panel.columns and "stmt_basic_eps" in panel.columns: mask = shares.isna() eps = panel.loc[mask, "stmt_basic_eps"].replace(0, np.nan) shares.loc[mask] = panel.loc[mask, "stmt_net_income"] / eps # Fallback 4: price-derived via Adj Close split-adjustment (vectorised) if "adj_close" in panel.columns and "close" in panel.columns: mask = shares.isna() if mask.any() and not company_info.empty and "marketCap" in company_info.columns: # Build anchor map from company_info anchor_df = company_info[["ticker", "marketCap"]].dropna().drop_duplicates(subset="ticker") anchor_map = dict(zip(anchor_df["ticker"], anchor_df["marketCap"])) adj_ratio = panel["close"] / panel["adj_close"].replace(0, np.nan) # For each ticker, find the anchor (latest) close and adj_ratio # using groupby + transform to avoid Python per-ticker loop tickers_needing_fb4 = panel.loc[mask, "ticker"].unique() tickers_with_anchor = [t for t in tickers_needing_fb4 if t in anchor_map] if tickers_with_anchor: # Subset to tickers that need fallback 4 AND have an anchor fb4_mask = mask & panel["ticker"].isin(tickers_with_anchor) fb4_panel = panel.loc[fb4_mask | panel["ticker"].isin(tickers_with_anchor)].copy() fb4_panel["_adj_ratio"] = adj_ratio.loc[fb4_panel.index] # Find the anchor row (latest date) per ticker latest_idx = fb4_panel.groupby("ticker")["date"].idxmax() anchor_rows = fb4_panel.loc[latest_idx, ["ticker", "close", "_adj_ratio"]].set_index("ticker") # Compute anchor shares and anchor adj_ratio per ticker anchor_info = pd.DataFrame({ "ticker": tickers_with_anchor, "mcap": [anchor_map[t] for t in tickers_with_anchor], }) anchor_info = anchor_info.merge(anchor_rows, on="ticker", how="inner") anchor_info["anchor_shares"] = anchor_info["mcap"] / anchor_info["close"].replace(0, np.nan) anchor_info["anchor_adj_ratio"] = anchor_info["_adj_ratio"] anchor_info = anchor_info.dropna(subset=["anchor_shares", "anchor_adj_ratio"]) anchor_info = anchor_info[anchor_info["anchor_adj_ratio"] != 0] if not anchor_info.empty: # Map back to panel rows ticker_to_anchor_shares = dict(zip(anchor_info["ticker"], anchor_info["anchor_shares"])) ticker_to_anchor_adj = dict(zip(anchor_info["ticker"], anchor_info["anchor_adj_ratio"])) applicable = mask & panel["ticker"].isin(anchor_info["ticker"]) if applicable.any(): tk_series = panel.loc[applicable, "ticker"] a_shares = tk_series.map(ticker_to_anchor_shares) a_adj = tk_series.map(ticker_to_anchor_adj) historical = a_shares / (adj_ratio.loc[applicable] / a_adj) shares.loc[applicable] = historical # ── Sanity check: detect XBRL unit errors (shares reported in thousands) ── # If shares × latest close > $5T for any ticker, the shares value is # almost certainly in wrong units. Divide by 1000 iteratively until sane. if "close" in panel.columns: _close = panel.groupby("ticker")["close"].transform("last") _mcap = shares * _close insane = _mcap > 5e12 # no real company exceeds $5T if insane.any(): tickers_insane = panel.loc[insane, "ticker"].unique() for t in tickers_insane: tmask = panel["ticker"] == t while (shares.loc[tmask] * _close.loc[tmask]).max() > 5e12: shares.loc[tmask] = shares.loc[tmask] / 1000 logger.warning( "Ticker %s: shares_outstanding corrected (XBRL unit error)", t ) # Sanity check: negative shares_outstanding is physically impossible. neg_mask = shares < 0 if neg_mask.any(): bad_tickers = panel.loc[neg_mask, "ticker"].unique() logger.warning( "Negative shares_outstanding for %d rows (%s) — setting to NaN.", neg_mask.sum(), list(bad_tickers), ) shares.loc[neg_mask] = np.nan # Sanity check: shares_outstanding > 10B likely a unit error. huge_mask = shares > 10e9 if huge_mask.any(): bad_tickers = panel.loc[huge_mask, "ticker"].unique() logger.warning( "shares_outstanding > 10B for %d rows (%s) — setting to NaN.", huge_mask.sum(), list(bad_tickers), ) shares.loc[huge_mask] = np.nan return shares def _compute_derived_metrics(panel: pd.DataFrame, granularity: str = "daily") -> pd.DataFrame: """Add time-varying derived value-estimation columns. Uses TTM (trailing-twelve-month) values for flow metrics (revenue, net income, EBITDA, FCF) so that ratios like P/E reflect annualised earnings, not a single quarter. Falls back to single-quarter values if TTM columns are unavailable. """ out = panel.copy() so = out.get("shares_outstanding") if so is None: return out close = out["close"] # Split-adjust shares_outstanding using the close/adj_close ratio. # XBRL shares_outstanding can be stale (from a pre-split filing) while # yfinance close is retroactively adjusted. When close/adj_close > 1.5, # a split occurred and we need to divide shares by the split ratio. # # IMPORTANT: require POSITIVE split_ratio in a sane range. Bad Yahoo # data (e.g., CBIO had negative adj_close values) would flip shares # to negative if we didn't guard against this. if "adj_close" in out.columns: adj_close_safe = out["adj_close"].replace(0, np.nan) # Treat non-positive adj_close as bad data → skip split adjust for those rows adj_close_safe = adj_close_safe.where(adj_close_safe > 0) split_ratio = close / adj_close_safe # Only adjust where the ratio is positive AND meaningfully != 1 needs_adj = ((split_ratio > 1.5) | (split_ratio < 0.67)) & (split_ratio > 0) if needs_adj.any(): so = so.copy() so.loc[needs_adj] = so.loc[needs_adj] / split_ratio.loc[needs_adj] n_adj = needs_adj.sum() n_tickers = out.loc[needs_adj, "ticker"].nunique() logger.info( "Split-adjusted shares_outstanding for %d rows (%d tickers) " "using close/adj_close ratio.", n_adj, n_tickers, ) out["derived_market_cap"] = close * so # Final safety: any negative mcap (shouldn't happen after above guard, # but catches anything weird) gets NaN. neg_mc = out["derived_market_cap"] < 0 if neg_mc.any(): n_neg = neg_mc.sum() n_t = out.loc[neg_mc, "ticker"].nunique() logger.warning("Negative derived_market_cap for %d rows (%d tickers) — setting to NaN.", n_neg, n_t) out.loc[neg_mc, "derived_market_cap"] = np.nan # Sanity: cap market cap at $100B — no small/micro-cap should exceed this. # The largest R2K member in our universe is ~$40B (a name that has drifted # up since reconstitution). Values above $100B (2.5× that) arise from # XBRL shares_outstanding unit errors × prices and should be NaN'd. # Previous threshold of $500B was too permissive for a small-cap benchmark. _MCAP_CEILING = 100e9 mcap_insane = out["derived_market_cap"] > _MCAP_CEILING if mcap_insane.any(): n_insane = mcap_insane.sum() tickers_insane = out.loc[mcap_insane, "ticker"].nunique() logger.warning( "derived_market_cap > $%.0fB for %d rows (%d tickers) — setting to NaN.", _MCAP_CEILING / 1e9, n_insane, tickers_insane, ) out.loc[mcap_insane, "derived_market_cap"] = np.nan def _col(name: str) -> pd.Series | None: """Return TTM column if available, else quarterly, else None.""" ttm = f"{name}_ttm" if ttm in out.columns: return out[ttm] if name in out.columns: return out[name] return None ni = _col("stmt_net_income") if ni is not None: # PE is economically meaningful only for profitable companies. # Null-mask for loss-makers (ni <= 0) rather than emitting huge # negative values that pollute downstream stats. ni_safe = ni.where(ni > 0) out["derived_pe"] = out["derived_market_cap"] / ni_safe if "stmt_total_debt" in out.columns and "stmt_cash" in out.columns: out["derived_ev"] = out["derived_market_cap"] + out["stmt_total_debt"].fillna(0) - out["stmt_cash"].fillna(0) rev = _col("stmt_revenue") if "derived_ev" in out.columns and rev is not None: out["derived_ev_to_revenue"] = out["derived_ev"] / rev.replace(0, np.nan) ebitda = _col("stmt_ebitda") if "derived_ev" in out.columns and ebitda is not None: out["derived_ev_to_ebitda"] = out["derived_ev"] / ebitda.replace(0, np.nan) fcf = _col("stmt_free_cashflow") if fcf is not None: out["derived_fcf_yield"] = fcf / out["derived_market_cap"].replace(0, np.nan) if "stmt_total_equity" in out.columns: out["derived_pb"] = out["derived_market_cap"] / out["stmt_total_equity"].replace(0, np.nan) if "stmt_total_debt" in out.columns and "stmt_total_equity" in out.columns: out["derived_debt_to_equity"] = out["stmt_total_debt"] / out["stmt_total_equity"].replace(0, np.nan) # ── Valuation-ready metrics ────────────────────────────────────── # Effective tax rate (Tax Provision / Pretax Income, clamped 0–50 %) tax = _col("stmt_tax_provision") pretax = _col("stmt_pretax_income") if tax is not None and pretax is not None: out["derived_effective_tax_rate"] = ( tax.abs() / pretax.replace(0, np.nan) ).clip(0.0, 0.50) # Cost of debt proxy (Interest Expense / Total Debt, clamped 0–20 %) int_exp = _col("stmt_interest_expense") if int_exp is not None and "stmt_total_debt" in out.columns: out["derived_cost_of_debt"] = ( int_exp.abs() / out["stmt_total_debt"].replace(0, np.nan) ).clip(0.0, 0.20) # Rolling beta vs S&P 500 (granularity-aware window) if "fred_SP500" in out.columns and "close" in out.columns: _gran = granularity if _gran == "monthly": _beta_window, _beta_min = 36, 12 elif _gran == "weekly": _beta_window, _beta_min = 52, 13 else: _beta_window, _beta_min = config.BETA_LOOKBACK_DAYS, 60 out["derived_beta"] = np.nan for tk, grp in out.groupby("ticker", sort=False): if len(grp) < _beta_min: continue stk_ret = grp["close"].pct_change() mkt_ret = grp["fred_SP500"].pct_change() # Rolling covariance / rolling market variance cov_sm = stk_ret.rolling(_beta_window, min_periods=_beta_min).cov(mkt_ret) var_m = mkt_ret.rolling(_beta_window, min_periods=_beta_min).var() beta = (cov_sm / var_m.replace(0, np.nan)).clip(0.1, 4.0) out.loc[grp.index, "derived_beta"] = beta # WACC estimate (simplified: Ke * E/(D+E) + Kd * (1-t) * D/(D+E)) if "derived_beta" in out.columns and "fred_DGS10" in out.columns: rf = out["fred_DGS10"].ffill() / 100.0 ke = rf + out["derived_beta"].fillna(1.0) * config.MARKET_RISK_PREMIUM kd = out.get("derived_cost_of_debt") if kd is None: kd = rf + 0.02 # fallback spread t = out.get("derived_effective_tax_rate") if t is None: t = 0.21 if "stmt_total_debt" in out.columns and "derived_market_cap" in out.columns: d = out["stmt_total_debt"].fillna(0) e = out["derived_market_cap"].fillna(0) total = (d + e).replace(0, np.nan) d_w = d / total e_w = e / total out["derived_wacc"] = (e_w * ke + d_w * kd * (1 - t)).clip(0.03, 0.25) # ── Margin & ratio metrics from new stmt_ fields ────────────── # Gross Profit % = Gross Profit / Revenue gp = _col("stmt_gross_profit") if gp is not None and rev is not None: out["derived_gross_margin"] = (gp / rev.replace(0, np.nan)).clip(-1, 1) # EBITDA Margin = EBITDA / Revenue if ebitda is not None and rev is not None: out["derived_ebitda_margin"] = (ebitda / rev.replace(0, np.nan)).clip(-2, 2) # Net Margin = Net Income / Revenue if ni is not None and rev is not None: out["derived_net_margin"] = (ni / rev.replace(0, np.nan)).clip(-2, 2) # COGS % of Revenue = COGS / Revenue cogs = _col("stmt_cogs") if cogs is not None and rev is not None: out["derived_cogs_pct"] = (cogs / rev.replace(0, np.nan)).clip(0, 2) # Revenue Growth YoY (per-ticker, lagged by granularity-appropriate periods) if rev is not None: if granularity == "monthly": lag_periods = 12 elif granularity == "weekly": lag_periods = 52 else: lag_periods = 252 out["derived_rev_growth_yoy"] = np.nan for tk, grp in out.groupby("ticker", sort=False): rev_vals = rev.loc[grp.index] rev_lag = rev_vals.shift(lag_periods) growth = (rev_vals - rev_lag) / rev_lag.replace(0, np.nan) out.loc[grp.index, "derived_rev_growth_yoy"] = growth.clip(-5, 50) # Current Ratio = Current Assets / Current Liabilities if "stmt_current_assets" in out.columns and "stmt_current_liabilities" in out.columns: cl = out["stmt_current_liabilities"].replace(0, np.nan) out["derived_current_ratio"] = (out["stmt_current_assets"] / cl).clip(0, 50) return out # =================================================================== # 2e -- Build column role index # =================================================================== def _build_column_roles(columns: list[str]) -> dict[str, list[str]]: """Classify panel columns into roles based on naming convention.""" roles: dict[str, list[str]] = { "target": [], "endogenous": [], "exogenous_fundamental": [], "exogenous_statement": [], "exogenous_macro": [], "exogenous_commodity": [], "context_filing": [], "context_real_estate": [], "metadata": [], } for c in columns: if c == "close": roles["target"].append(c) elif c in ("open", "high", "low", "volume", "adj_close"): roles["endogenous"].append(c) elif c.startswith("derived_") or c == "shares_outstanding": roles["exogenous_fundamental"].append(c) elif c.startswith("stmt_"): roles["exogenous_statement"].append(c) elif c.startswith("fred_"): roles["exogenous_macro"].append(c) elif c.startswith("eia_"): roles["exogenous_commodity"].append(c) elif c.startswith("nearest_filing") or c == "days_since_filing": roles["context_filing"].append(c) elif c.startswith("re_"): roles["context_real_estate"].append(c) else: roles["metadata"].append(c) return roles # =================================================================== # Public API # =================================================================== def run(granularity: str | None = None) -> pd.DataFrame: """Execute Layer 2 preprocessing and return the merged panel DataFrame. Parameters ---------- granularity : str, optional ``"daily"``, ``"weekly"``, or ``"monthly"``. Defaults to ``config.GRANULARITY``. """ if granularity is None: granularity = config.GRANULARITY out_dir = config.DATA_DIR / "processed" / granularity out_dir.mkdir(parents=True, exist_ok=True) # --- 2a. Load raw data --------------------------------------------------- logger.info("Loading raw data ...") prices_raw = _load_prices() universe = _load_universe() company_info = _load_company_info() macro_raw = _load_macro_raw() # Filter out excluded tickers (unadjusted reverse-split prices) if config.EXCLUDED_TICKERS: prices_raw = prices_raw[~prices_raw["ticker"].isin(config.EXCLUDED_TICKERS)] tickers = prices_raw["ticker"].unique().tolist() logger.info("Loaded prices: %d rows, %d tickers.", len(prices_raw), len(tickers)) # --- 2b. Resample --------------------------------------------------------- logger.info("Resampling to %s ...", granularity) prices = _resample_prices(prices_raw, granularity) macro = _resample_macro(macro_raw, granularity) logger.info("Resampled prices: %d rows.", len(prices)) # --- 2c. Merge into panel ------------------------------------------------- panel = prices.copy() # Static metadata from universe static_cols = ["ticker", "sector", "industry", "exchange", "in_russell_2000", "lower_end_russell2000", "small_cap_outside"] static_cols = [c for c in static_cols if c in universe.columns] panel = panel.merge(universe[static_cols], on="ticker", how="left") # Static metadata from company_info (only truly static fields). # For sector and industry: the universe CSV has these for IWM/IJR/IWC # tickers (from iShares) but they are NULL for UNCOVERED tickers. # company_info.csv (from yfinance .info) has sector/industry for ~99.6% # of all tickers. We fill NaN values from company_info AFTER the # universe merge so that UNCOVERED tickers get their sector/industry. if not company_info.empty: info_static = ["ticker"] for col in ["sector", "industry", "fullTimeEmployees"]: if col in company_info.columns: if col not in panel.columns: info_static.append(col) else: # Column exists but may have NaN from universe merge # (e.g. UNCOVERED tickers). Fill NaN from company_info. ci_map = company_info.set_index("ticker")[col].dropna() null_mask = panel[col].isna() if null_mask.any(): filled = panel.loc[null_mask, "ticker"].map(ci_map) panel.loc[null_mask, col] = filled n_filled = filled.notna().sum() if n_filled > 0: logger.info("Filled %d NaN %s values from company_info.", n_filled, col) if len(info_static) > 1: panel = panel.merge(company_info[info_static], on="ticker", how="left") # Keep marketCap for shares_outstanding fallback (not merged into panel) # Normalize exchange names. iShares and NASDAQ Trader use different # conventions for the same exchanges (e.g. "Nyse Mkt Llc" vs "NYSE_MKT"). _EXCHANGE_NORMALIZE: dict[str, str] = { "Nyse Mkt Llc": "NYSE MKT", "NYSE_MKT": "NYSE MKT", "Non-Nms Quotation Service (Nnqs)": "OTC", "NO MARKET (E.G. UNLISTED)": "OTC", } if "exchange" in panel.columns: panel["exchange"] = panel["exchange"].replace(_EXCHANGE_NORMALIZE) # Normalize sector names to GICS convention. iShares uses GICS names # (e.g. "Health Care"), yfinance uses its own convention (e.g. "Healthcare"). # After filling NaN sectors from company_info, the panel has a mix of both. # Standardize to GICS so all sector-based analysis is consistent. _SECTOR_NORMALIZE: dict[str, str] = { "Financial Services": "Financials", "Healthcare": "Health Care", "Consumer Cyclical": "Consumer Discretionary", "Technology": "Information Technology", "Basic Materials": "Materials", "Communication Services": "Communication", "Consumer Defensive": "Consumer Staples", } if "sector" in panel.columns: before_unique = panel["sector"].nunique() panel["sector"] = panel["sector"].replace(_SECTOR_NORMALIZE) after_unique = panel["sector"].nunique() if before_unique != after_unique: logger.info("Normalized sector names: %d → %d unique values (GICS convention).", before_unique, after_unique) # Industry→Sector consistency: if an industry maps to multiple sectors # across tickers (yfinance vs iShares taxonomies differ), force all rows # with that industry to use the modal sector. This ensures # industry→sector is 1:1 as expected by GICS. if "industry" in panel.columns and "sector" in panel.columns: mode_map = panel.dropna(subset=["industry","sector"]).groupby("industry")["sector"].agg( lambda x: x.mode().iloc[0] if len(x.mode()) > 0 else None ) has_ind = panel["industry"].notna() if has_ind.any(): panel.loc[has_ind, "sector"] = panel.loc[has_ind, "industry"].map(mode_map).fillna(panel.loc[has_ind, "sector"]) logger.info("Applied industry→sector modal normalization.") logger.info("Merged static metadata.") # Statement financials (as-of merge) # Source 1: yfinance quarterly statements (~5 recent quarters) logger.info("Loading per-ticker financial statements (yfinance) ...") yf_frames: list[pd.DataFrame] = [] for ticker in tickers: stmt = _load_statement_long(ticker) if not stmt.empty: yf_frames.append(stmt) # Source 2: SEC EDGAR XBRL facts (10+ years of history) logger.info("Loading XBRL historical statements ...") xbrl_stmts = _load_xbrl_statements(tickers) # Combine: XBRL provides the long history, yfinance overwrites with # its more recent (and often more complete) data where both exist. all_stmts: pd.DataFrame | None = None if not xbrl_stmts.empty: all_stmts = xbrl_stmts if yf_frames: yf_all = pd.concat(yf_frames, ignore_index=True) if all_stmts is not None: # Align columns: ensure both DataFrames share the same stmt_ set all_stmt_cols = sorted( {c for c in all_stmts.columns if c.startswith("stmt_")} | {c for c in yf_all.columns if c.startswith("stmt_")} ) for c in all_stmt_cols: if c not in all_stmts.columns: all_stmts[c] = np.nan if c not in yf_all.columns: yf_all[c] = np.nan # Concat then deduplicate: prefer yfinance (listed last → keep="last") combined = pd.concat([all_stmts, yf_all], ignore_index=True) combined = combined.sort_values("date") combined = combined.drop_duplicates( subset=["ticker", "date"], keep="last", ) all_stmts = combined else: all_stmts = yf_all if all_stmts is not None and not all_stmts.empty: stmt_cols = [c for c in all_stmts.columns if c.startswith("stmt_")] # Forward-fill per ticker: different XBRL tags report on different # period_end dates, so the wide DataFrame is sparse. Carrying the # last known value forward ensures merge_asof picks up the most # recent data for *every* column, not just the columns that happen # to be non-null at the single nearest-prior row. all_stmts = all_stmts.sort_values(["ticker", "date"]).reset_index(drop=True) for col in stmt_cols: all_stmts[col] = all_stmts.groupby("ticker")[col].ffill() # Backfill the initial gap: for rows before a ticker's first # filing, carry the earliest known value backward so that # merge_asof can find data for every panel row. all_stmts[col] = all_stmts.groupby("ticker")[col].bfill() # merge_asof requires the 'on' key to be globally sorted AND both # sides must have matching dtype (datetime64[ns]). XBRL can produce # date columns as object dtype when values fall outside the standard # pandas range or contain mixed types; coerce explicitly. all_stmts["date"] = pd.to_datetime(all_stmts["date"], errors="coerce") panel["date"] = pd.to_datetime(panel["date"], errors="coerce") all_stmts = all_stmts.dropna(subset=["date"]).sort_values("date").reset_index(drop=True) panel = panel.dropna(subset=["date"]).sort_values("date").reset_index(drop=True) # Apply balance-equation validation to the COMBINED statements # (XBRL + yfinance). Rows with A ≠ L + E beyond 1% get A/L/E set # to NaN so they don't propagate wrong numbers to the panel. if all(c in all_stmts.columns for c in ["stmt_total_assets", "stmt_total_liabilities", "stmt_total_equity"]): A = all_stmts["stmt_total_assets"] L = all_stmts["stmt_total_liabilities"] E = all_stmts["stmt_total_equity"] all_present = A.notna() & L.notna() & E.notna() rel_err = ((A - L.fillna(0) - E.fillna(0)).abs() / A.abs().replace(0, np.nan)) bad = all_present & (rel_err > 0.01) if bad.any(): n = bad.sum() all_stmts.loc[bad, ["stmt_total_assets", "stmt_total_liabilities", "stmt_total_equity"]] = np.nan logger.info("Combined statements: dropped A/L/E for %d rows with balance mismatch > 1%% (post-combine).", n) panel = pd.merge_asof( panel, all_stmts[["ticker", "date"] + stmt_cols], on="date", by="ticker", direction="backward", ) logger.info("Merged statement financials (%d metrics) via as-of join.", len(stmt_cols)) # Statement value sanity with RECOVERY, not just NaN. # Negative revenue / non-positive assets often come from forward-filling # a single bad XBRL value. The correct economic value exists in a prior # filing — we replace each bad value with the last known-good (positive) # value from the same ticker, forward-filled. sanity_rules = [ ("stmt_revenue", "< 0", lambda s: s < 0), ("stmt_revenue_ttm", "< 0", lambda s: s < 0), ("stmt_total_assets", "<= 0", lambda s: s <= 0), ("stmt_total_liabilities", "< 0", lambda s: s < 0), ] panel = panel.sort_values(["ticker", "date"]) for col, rule_name, rule_fn in sanity_rules: if col not in panel.columns: continue bad = rule_fn(panel[col]) & panel[col].notna() if not bad.any(): continue n_bad = int(bad.sum()) # Null bad values, then forward-fill per ticker to recover last valid positive panel.loc[bad, col] = np.nan panel[col] = panel.groupby("ticker")[col].ffill() # Any residual (ticker never had positive value): keep NaN — genuinely unknown still_bad = rule_fn(panel[col]) & panel[col].notna() if still_bad.any(): panel.loc[still_bad, col] = np.nan remaining = panel[col].isna().sum() logger.info("Sanity fix %s %s: %d bad values recovered via per-ticker forward-fill (final nulls: %d)", col, rule_name, n_bad, remaining) # Final pass: post-as-of-merge balance-equation residual purge. # The earlier per-statement fix purges bad filings before merging, but # merge_asof can carry a small number of bad A/L/E triples forward on # the daily panel. Additionally, independently forward-filling each # column per ticker can recombine values from different source rows, # producing a post-fill triple that is itself imbalanced (observed # bug: VS ticker, 90 residual rows). Fix: forward-fill as a unified # triple, sourcing ONLY from rows where all three were originally # present AND balanced. Any row that cannot source from such a row # stays NaN across all three. ble_cols = ["stmt_total_assets", "stmt_total_liabilities", "stmt_total_equity"] if all(c in panel.columns for c in ble_cols): # CRITICAL: sort FIRST, then compute good — otherwise the good # mask is aligned to the pre-sort row order and the fill below # sources from the wrong rows (v2 bug, observed as 1,968 residuals # vs. 113 with the simpler v1 fix). Work on a reset-index frame. panel = panel.sort_values(["ticker", "date"]).reset_index(drop=True) A = panel["stmt_total_assets"] L = panel["stmt_total_liabilities"] E = panel["stmt_total_equity"] all_present = A.notna() & L.notna() & E.notna() rel_err = ((A - L.fillna(0) - E.fillna(0)).abs() / A.abs().replace(0, np.nan)) good = all_present & (rel_err <= 0.01) n_bad_initial = int((all_present & ~good).sum()) # Per-ticker row-index of the most recent good row (carries # the triple as a unit, avoiding the independent-column drift # that broke v1). idx_series = pd.Series(panel.index.to_numpy(), index=panel.index) good_idx = idx_series.where(good) last_good = good_idx.groupby(panel["ticker"]).ffill() fill_mask = (~good) & last_good.notna() if fill_mask.any(): src_idx = last_good[fill_mask].astype(int).to_numpy() dst_idx = panel.index[fill_mask].to_numpy() for c in ble_cols: panel.loc[dst_idx, c] = panel[c].to_numpy()[src_idx] orphan_mask = (~good) & last_good.isna() if orphan_mask.any(): panel.loc[orphan_mask, ble_cols] = np.nan # Post-verify the invariant actually holds on what we kept. A2 = panel["stmt_total_assets"] L2 = panel["stmt_total_liabilities"] E2 = panel["stmt_total_equity"] all2 = A2.notna() & L2.notna() & E2.notna() rel2 = ((A2 - L2.fillna(0) - E2.fillna(0)).abs() / A2.abs().replace(0, np.nan)) residual = int((all2 & (rel2 > 0.01)).sum()) logger.info( "Balance-eq residual purge: %d bad → %d filled / %d orphan-nulled" " / %d residual (post-fix verify)", n_bad_initial, int(fill_mask.sum()), int(orphan_mask.sum()), residual, ) panel = panel.reset_index(drop=True) else: logger.warning("No statement financials loaded.") # Macro / commodity (as-of merge, broadcast to all tickers) if not macro.empty: macro = macro.sort_values("date").reset_index(drop=True) macro_cols = [c for c in macro.columns if c != "date"] macro[macro_cols] = macro[macro_cols].ffill() panel = panel.sort_values("date").reset_index(drop=True) panel = pd.merge_asof(panel, macro, on="date", direction="backward") logger.info("Merged macro data (%d series).", len(macro_cols)) else: logger.warning("No macro data loaded.") # Filing context filing_lookup = _load_filing_metadata(tickers) tickers_with_filings = sum(1 for v in filing_lookup.values() if v) if tickers_with_filings > 0: panel = _attach_nearest_filing(panel, filing_lookup) logger.info("Attached filing context (%d tickers have filings).", tickers_with_filings) else: logger.warning("No filings found for any ticker.") panel["nearest_filing_type"] = None panel["nearest_filing_date"] = pd.NaT panel["nearest_filing_path"] = None panel["days_since_filing"] = np.nan # Real estate summary — removed: these 15 columns are global aggregates # broadcast identically to every row (e.g. re_properties_count=47507). # They carry zero per-row information and inflate the feature count. # The summary is still available via _load_real_estate_summary(). # re_summary = _load_real_estate_summary() # disabled — see data-quality audit # --- 2d. Derive time-varying metrics -------------------------------------- logger.info("Deriving time-varying metrics ...") panel["shares_outstanding"] = _derive_shares_outstanding(panel, company_info) panel = _compute_derived_metrics(panel, granularity=granularity) # --- Small-cap filter already applied at universe collection time --- # collect_universe.py applies the $7.4B market_cap filter to IWC and # UNCOVERED tickers only. IWM (Russell 2000) and IJR (S&P SmallCap 600) # tickers are kept regardless of market_cap because they are index- # designated small-caps. No additional filtering is needed here — # the universe CSV is the authoritative ticker set. # Labels panel["label"] = "other" if "lower_end_russell2000" in panel.columns: panel.loc[panel["lower_end_russell2000"] == True, "label"] = "lower_end_r2k" # noqa: E712 if "small_cap_outside" in panel.columns: panel.loc[panel["small_cap_outside"] == True, "label"] = "small_cap_outside" # noqa: E712 # Final sort panel = panel.sort_values(["ticker", "date"]).reset_index(drop=True) # --- 2e. Save ------------------------------------------------------------- panel.to_parquet(out_dir / "panel.parquet", index=False) col_roles = _build_column_roles(list(panel.columns)) (out_dir / "columns.json").write_text(json.dumps(col_roles, indent=2)) logger.info( "Panel saved: %d rows, %d tickers, %d columns at %s granularity. -> %s", len(panel), panel["ticker"].nunique(), len(panel.columns), granularity, out_dir / "panel.parquet", ) return panel