MacroLens / code /collect_fundamentals.py
itouchz's picture
Add files using upload-large-folder tool
5995ef5 verified
"""Step 2: Collect company fundamentals.
For each ticker in the universe: yfinance .info (PE, EPS, margins, ROE,
ROA, market cap, revenue, EBITDA) and quarterly financial statements
(quarterly_income_stmt, quarterly_balance_sheet, quarterly_cashflow).
Processes tickers sequentially with a mandatory delay between requests
to stay under yfinance rate limits. Includes retry with exponential
backoff on rate-limit errors.
Output:
data/fundamentals/company_info.csv -- summary info per ticker
data/fundamentals/{TICKER}_income.csv -- quarterly income statement
data/fundamentals/{TICKER}_balance.csv -- quarterly balance sheet
data/fundamentals/{TICKER}_cashflow.csv -- quarterly cash flow statement
"""
from __future__ import annotations
import logging
import time
from pathlib import Path
from typing import Any
import pandas as pd
import yfinance as yf
from . import config
logger = logging.getLogger(__name__)
# Fields to pull from yfinance Ticker.info
INFO_FIELDS = [
"marketCap",
"trailingPE",
"forwardPE",
"trailingEps",
"forwardEps",
"priceToSalesTrailing12Months",
"priceToBook",
"enterpriseValue",
"enterpriseToRevenue",
"enterpriseToEbitda",
"profitMargins",
"operatingMargins",
"grossMargins",
"returnOnEquity",
"returnOnAssets",
"debtToEquity",
"totalRevenue",
"revenueGrowth",
"ebitda",
"totalDebt",
"totalCash",
"freeCashflow",
"operatingCashflow",
"sector",
"industry",
"fullTimeEmployees",
]
def _collect_single_ticker(ticker: str, out_dir: Path, max_retries: int = 3) -> dict[str, Any] | None:
"""Collect info + statements for one ticker. Returns info dict or None."""
info_path = out_dir / f"{ticker}_info_done.flag"
if info_path.exists():
return None # already collected
info = None
for attempt in range(max_retries):
try:
t = yf.Ticker(ticker)
info = t.info
break
except Exception as exc:
err_str = str(exc)
if "Too Many Requests" in err_str or "Rate" in err_str:
wait = 2 ** attempt * 5 # 5s, 10s, 20s
time.sleep(wait)
continue
logger.warning("Skipping %s (.info failed): %s", ticker, exc)
return None
if info is None:
logger.warning("Rate-limited for %s after %d retries", ticker, max_retries)
return None
row: dict[str, Any] = {"ticker": ticker}
for field in INFO_FIELDS:
row[field] = info.get(field) # type: ignore[union-attr]
# Quarterly financial statements (native quarterly granularity)
for attr, suffix in [
("quarterly_income_stmt", "income"),
("quarterly_balance_sheet", "balance"),
("quarterly_cashflow", "cashflow"),
]:
try:
stmt: pd.DataFrame = getattr(t, attr)
if stmt is not None and not stmt.empty:
stmt.to_csv(out_dir / f"{ticker}_{suffix}.csv")
except Exception as exc:
logger.debug("Could not get %s for %s: %s", attr, ticker, exc)
# Mark as done
_ = info_path.write_text("done")
return row
def run(tickers: list[str] | None = None) -> pd.DataFrame:
"""Execute Step 2 and return the company_info DataFrame."""
config.FUNDAMENTALS_DIR.mkdir(parents=True, exist_ok=True)
out_path = config.FUNDAMENTALS_DIR / "company_info.csv"
if tickers is None:
universe_path = config.UNIVERSE_DIR / "benchmark_universe.csv"
if not universe_path.exists():
raise FileNotFoundError(f"Run Step 1 first: {universe_path}")
tickers = pd.read_csv(universe_path)["ticker"].tolist()
# Filter to tickers not yet collected (resume-safe via flag files)
already_done = {f.stem.replace("_info_done", "")
for f in config.FUNDAMENTALS_DIR.glob("*_info_done.flag")}
remaining = [t for t in tickers if t not in already_done]
logger.info("Collecting fundamentals for %d tickers (%d already done) ...",
len(remaining), len(already_done))
rows: list[dict] = []
# Sequential with delay to avoid yfinance rate limits
for i, ticker in enumerate(remaining):
result = _collect_single_ticker(ticker, config.FUNDAMENTALS_DIR)
if result is not None:
rows.append(result)
# Checkpoint every 10 tickers (more frequent = less data loss on crash,
# and the flag file is only written AFTER statements are saved so the
# checkpoint is the only window where data could be lost)
if (i + 1) % 10 == 0:
if rows:
_df = pd.DataFrame(rows)
if out_path.exists():
_existing = pd.read_csv(out_path)
_df = pd.concat([_existing, _df]).drop_duplicates(subset="ticker", keep="last")
_df.sort_values("ticker").to_csv(out_path, index=False)
rows.clear() # flush — already persisted
if (i + 1) % 50 == 0:
logger.info("Fundamentals progress: %d / %d", i + 1, len(remaining))
# Mandatory delay between requests to stay under yfinance limits
time.sleep(1.5)
if rows:
new_df = pd.DataFrame(rows)
# Merge with any existing data (resume-safe)
if out_path.exists():
existing = pd.read_csv(out_path)
combined = pd.concat([existing, new_df]).drop_duplicates(subset="ticker", keep="last")
else:
combined = new_df
combined.sort_values("ticker").to_csv(out_path, index=False)
logger.info("Saved company_info (%d rows) to %s", len(combined), out_path)
return combined
if out_path.exists():
return pd.read_csv(out_path)
return pd.DataFrame()