| """Step 3: Collect daily stock prices (OHLCV + Adj Close). |
| |
| Downloads daily OHLCV data for the full ticker universe using |
| yf.download() in batches of PRICE_BATCH_SIZE. |
| |
| Uses ``auto_adjust=False`` to preserve the ``Adj Close`` column, |
| which is required for the shares_outstanding derivation in Layer 2. |
| |
| Includes retry with backoff for rate-limited batches, per-batch |
| checkpointing (so crashes don't lose all progress), and filters out |
| tickers/rows where ``Close`` is entirely NaN (junk/delisted symbols). |
| |
| Output: data/prices/daily_prices.csv |
| """ |
|
|
| from __future__ import annotations |
|
|
| import logging |
| import os |
| import tempfile |
| import time |
|
|
| import pandas as pd |
| import yfinance as yf |
|
|
| from . import config |
|
|
| logger = logging.getLogger(__name__) |
|
|
| MAX_BATCH_RETRIES = 3 |
|
|
|
|
| def _download_batch_with_retry( |
| batch: list[str], |
| start: str, |
| end: str, |
| retries: int = MAX_BATCH_RETRIES, |
| ) -> pd.DataFrame | None: |
| """Download a batch of tickers with retry on rate-limit errors.""" |
| for attempt in range(retries): |
| try: |
| df = yf.download( |
| batch, |
| start=start, |
| end=end, |
| group_by="ticker", |
| auto_adjust=False, |
| threads=True, |
| ) |
| if df is not None and not df.empty: |
| return df |
| return None |
| except Exception as exc: |
| err_str = str(exc) |
| if "Too Many Requests" in err_str or "Rate" in err_str: |
| wait = 2 ** attempt * 5 |
| logger.warning("Batch rate-limited (attempt %d/%d), waiting %ds ...", |
| attempt + 1, retries, wait) |
| time.sleep(wait) |
| continue |
| logger.warning("Batch download failed: %s", exc) |
| return None |
| logger.warning("Batch exhausted %d retries.", retries) |
| return None |
|
|
|
|
| def _reshape_batch(raw: pd.DataFrame, batch: list[str]) -> pd.DataFrame: |
| """Reshape a yfinance MultiIndex batch to long format.""" |
| records = [] |
| if isinstance(raw.columns, pd.MultiIndex): |
| for ticker in raw.columns.get_level_values(0).unique(): |
| try: |
| sub = raw[ticker].copy() |
| sub = sub.reset_index() |
| sub["Ticker"] = ticker |
| records.append(sub) |
| except Exception as exc: |
| logger.warning("Could not reshape ticker %s: %s", ticker, exc) |
| continue |
| else: |
| |
| raw = raw.reset_index() |
| raw["Ticker"] = batch[0] if len(batch) == 1 else "UNKNOWN" |
| records.append(raw) |
|
|
| if not records: |
| return pd.DataFrame() |
| return pd.concat(records, ignore_index=True) |
|
|
|
|
| def _atomic_csv_write(df: pd.DataFrame, dest) -> None: |
| """Write CSV atomically via temp file + rename.""" |
| dest_parent = dest.parent if hasattr(dest, "parent") else os.path.dirname(dest) |
| fd, tmp_path = tempfile.mkstemp(suffix=".csv", dir=dest_parent) |
| try: |
| os.close(fd) |
| df.to_csv(tmp_path, index=False) |
| os.replace(tmp_path, str(dest)) |
| except BaseException: |
| try: |
| os.unlink(tmp_path) |
| except OSError: |
| pass |
| raise |
|
|
|
|
| def run(tickers: list[str] | None = None) -> pd.DataFrame: |
| """Execute Step 3 and return the daily prices DataFrame.""" |
| config.PRICES_DIR.mkdir(parents=True, exist_ok=True) |
| out_path = config.PRICES_DIR / "daily_prices.csv" |
| checkpoint_path = config.PRICES_DIR / "_prices_checkpoint.csv" |
|
|
| if out_path.exists(): |
| logger.info("Daily prices file already exists at %s, loading.", out_path) |
| return pd.read_csv(out_path, parse_dates=["Date"]) |
|
|
| if tickers is None: |
| universe_path = config.UNIVERSE_DIR / "benchmark_universe.csv" |
| if not universe_path.exists(): |
| raise FileNotFoundError(f"Run Step 1 first: {universe_path}") |
| tickers = pd.read_csv(universe_path)["ticker"].tolist() |
|
|
| |
| existing = pd.DataFrame() |
| already_done: set[str] = set() |
| if checkpoint_path.exists(): |
| try: |
| existing = pd.read_csv(checkpoint_path) |
| already_done = set(existing["Ticker"].unique()) |
| logger.info("Resuming from checkpoint: %d tickers already downloaded.", len(already_done)) |
| except Exception: |
| logger.warning("Checkpoint file corrupt, starting fresh.") |
| existing = pd.DataFrame() |
|
|
| remaining = [t for t in tickers if t not in already_done] |
| logger.info("Downloading daily prices for %d tickers (%d already done) ...", |
| len(remaining), len(already_done)) |
|
|
| batch_size = config.PRICE_BATCH_SIZE |
| batches_since_checkpoint = 0 |
|
|
| for i in range(0, len(remaining), batch_size): |
| batch = remaining[i : i + batch_size] |
| logger.info("Downloading batch %d-%d / %d remaining", i, i + len(batch), len(remaining)) |
| raw = _download_batch_with_retry(batch, config.START_DATE, config.END_DATE) |
| if raw is not None: |
| reshaped = _reshape_batch(raw, batch) |
| if not reshaped.empty: |
| existing = pd.concat([existing, reshaped], ignore_index=True) |
| batches_since_checkpoint += 1 |
|
|
| |
| if batches_since_checkpoint >= 5 and not existing.empty: |
| _atomic_csv_write(existing, checkpoint_path) |
| batches_since_checkpoint = 0 |
| logger.info(" Checkpoint saved (%d rows, %d tickers).", |
| len(existing), existing["Ticker"].nunique()) |
|
|
| if existing.empty: |
| logger.warning("No price data downloaded.") |
| return pd.DataFrame() |
|
|
| |
| col_map = {c: c.strip() for c in existing.columns} |
| result = existing.rename(columns=col_map) |
|
|
| |
| before_len = len(result) |
| result = result.dropna(subset=["Close"]) |
| dropped = before_len - len(result) |
| if dropped > 0: |
| logger.info("Filtered %d rows with NaN Close (kept %d).", dropped, len(result)) |
|
|
| |
| valid_tickers = result["Ticker"].nunique() |
| all_tickers_set = set(tickers) |
| tickers_in_result = set(result["Ticker"].unique()) |
| missing = all_tickers_set - tickers_in_result |
| if missing: |
| logger.info("%d tickers had no valid price data: %s", |
| len(missing), sorted(missing)) |
|
|
| _atomic_csv_write(result, out_path) |
| logger.info("Saved daily prices (%d rows, %d tickers) to %s", |
| len(result), valid_tickers, out_path) |
|
|
| |
| if checkpoint_path.exists(): |
| checkpoint_path.unlink() |
|
|
| return result |
|
|