MacroLens / code /collect_universe.py
itouchz's picture
Add files using upload-large-folder tool
5995ef5 verified
"""Step 1: Collect the small-cap ticker universe.
Universe definition: union of small-cap-and-below tickers from major
S&P/Russell/iShares ETFs:
- IWM: iShares Russell 2000 ETF (Russell 2000 small-caps)
- IJR: iShares Core S&P SmallCap ETF (S&P 600 small-caps)
- IWC: iShares Micro-Cap ETF (micro-caps below small-cap threshold)
Tickers exceeding the S&P 600 upper bound ($7.4B median market cap) are
filtered out downstream in preprocess.py via SMALL_CAP_MAX_MEDIAN_MCAP.
We do NOT filter on ETF holding value here because it does not correlate
with actual company market cap (mega-caps may have small ETF positions).
This satisfies Prof. Hwang's Requirement 1.1: "Collect R2K + small caps".
- Uses iShares CSV data directly for market value, sector, exchange.
- Normalises multi-class share tickers (e.g. BFA -> BF-A) so yfinance can find them.
- Removes duplicates, zero-price entries, and non-equity rows.
Output: data/universe/benchmark_universe.csv
"""
from __future__ import annotations
import io
import logging
import math
import os
import tempfile
import time
import httpx
import pandas as pd
from . import config
logger = logging.getLogger(__name__)
_MAX_HTTP_RETRIES = 3
# Sanity bounds for company market cap (USD).
# Anything outside this range is treated as an invalid lookup.
_MCAP_MIN_VALID = 1.0e5 # $100k — below this is almost certainly bad data
_MCAP_MAX_VALID = 1.0e13 # $10T — above this is impossible
# yfinance lookup pacing — pure serial.
#
# Empirically, ANY parallelism (even 4 workers × 0.3s delay = ~5 req/s)
# triggers Yahoo's per-IP rate limit on runs of >2000 tickers, dropping
# coverage to ~60%. Pure serial at ~3 req/s stays under the threshold and
# achieves ~99% coverage. For ~5,345 tickers this takes ~27 minutes — that
# is the minimum reliable wall time for this dataset size.
_MCAP_LOOKUP_DELAY_SEC = 0.3
# iShares strips the dash from multi-class share tickers.
# This map restores the yfinance-compatible format.
_CLASS_SHARE_FIXES: dict[str, str] = {
"BFA": "BF-A",
"BFB": "BF-B",
"BRKB": "BRK-B",
"LENB": "LEN-B",
"MOGA": "MOG-A",
"MOGB": "MOG-B",
"GEFB": "GEF-B",
"CWENA": "CWEN-A",
"UHALB": "UHAL-B",
"CRDA": "CRD-A", # Crawford & Co Class A — non-voting
"CRDB": "CRD-B", # Crawford & Co Class B — voting
}
# NASDAQ Trader public symbol directory — authoritative source for ALL
# US-listed common equities (NASDAQ + NYSE + NYSE Mkt + AMEX). Used to
# populate Prof. Hwang's third universe component: small caps that are
# NOT in any major index (recent IPOs, between-rebalance additions,
# dropped-from-index small caps still trading).
_NASDAQ_LISTED_URL = "https://www.nasdaqtrader.com/dynamic/symdir/nasdaqlisted.txt"
_OTHER_LISTED_URL = "https://www.nasdaqtrader.com/dynamic/symdir/otherlisted.txt"
def _download_ishares_holdings(url: str) -> pd.DataFrame:
"""Download iShares ETF holdings CSV and return a cleaned DataFrame."""
for attempt in range(_MAX_HTTP_RETRIES):
try:
resp = httpx.get(url, follow_redirects=True, timeout=60)
resp.raise_for_status()
break
except Exception as exc:
if attempt < _MAX_HTTP_RETRIES - 1:
wait = 2 ** attempt * 5
logger.warning("iShares download failed (attempt %d/%d), retrying in %ds: %s",
attempt + 1, _MAX_HTTP_RETRIES, wait, exc)
time.sleep(wait)
else:
raise
text = resp.text
# iShares CSVs have metadata rows before the actual header.
lines = text.splitlines()
header_idx = 0
for i, line in enumerate(lines):
if line.strip().lower().startswith("ticker"):
header_idx = i
break
csv_text = "\n".join(lines[header_idx:])
df = pd.read_csv(io.StringIO(csv_text))
df.columns = [c.strip() for c in df.columns]
if "Ticker" in df.columns:
df = df[df["Ticker"].notna() & (df["Ticker"].str.strip() != "-") & (df["Ticker"].str.strip() != "")]
df["Ticker"] = df["Ticker"].str.strip().str.upper()
# Filter out junk rows (e.g. iShares copyright disclaimers parsed as tickers)
df = df[df["Ticker"].str.len() <= 10]
# Keep only equity instruments (remove futures, cash, CVRs, etc.)
if "Asset Class" in df.columns:
before = len(df)
df = df[df["Asset Class"].str.strip().str.lower() == "equity"]
dropped = before - len(df)
if dropped > 0:
logger.info("Filtered %d non-equity entries (kept %d equities).", dropped, len(df))
# Remove zero-price entries (CVRs, escrows, delisted, private vestings
# that iShares mislabels as Equity)
if "Price" in df.columns:
price_num = pd.to_numeric(df["Price"].astype(str).str.replace(",", ""), errors="coerce")
before = len(df)
df = df[price_num > 0]
dropped = before - len(df)
if dropped > 0:
logger.info("Filtered %d zero-price entries (CVRs/escrows/delisted).", dropped)
return df
def _download_nasdaq_trader(url: str) -> pd.DataFrame:
"""Download a pipe-delimited NASDAQ Trader symbol directory file.
Both nasdaqlisted.txt and otherlisted.txt share the same format:
pipe-delimited, one header row, last line is a 'File Creation Time'
footer that must be skipped.
"""
for attempt in range(_MAX_HTTP_RETRIES):
try:
resp = httpx.get(url, follow_redirects=True, timeout=60)
resp.raise_for_status()
break
except Exception as exc:
if attempt < _MAX_HTTP_RETRIES - 1:
wait = 2 ** attempt * 5
logger.warning("NASDAQ Trader download failed (attempt %d/%d), retrying in %ds: %s",
attempt + 1, _MAX_HTTP_RETRIES, wait, exc)
time.sleep(wait)
else:
raise
text = resp.text
# Drop the trailing "File Creation Time" footer line
lines = [ln for ln in text.splitlines() if ln and not ln.startswith("File Creation Time")]
df = pd.read_csv(io.StringIO("\n".join(lines)), sep="|")
df.columns = [c.strip() for c in df.columns]
return df
def _collect_uncovered_smallcaps(already_seen: set[str]) -> list[dict]:
"""Return candidate records for Prof. Hwang's third universe component:
small caps listed on NYSE/NASDAQ that are NOT in any major index
(specifically not in the IWM/IJR/IWC ETF holdings already collected).
Each record is a dict with keys: ticker, exchange, name. The exchange
and security name come directly from the NASDAQ Trader symbol directory
files (no extra API calls). Sector is filled later by collect_fundamentals.
The mcap filter (≤ $7.4B) is applied later in run() via the same serial
yfinance lookup pass; this function only produces the candidate set.
Filtering rules:
- Drop ETFs (ETF=Y in nasdaqlisted.txt)
- Drop test issues (Test Issue=Y)
- Drop tickers already in IWM/IJR/IWC (passed via `already_seen`)
- Drop preferreds (containing '$' or '.' which mark preferred classes)
- Drop warrants and units (suffix W/U/R on a 5-char base)
- Keep only common stock (Common Stock / Common Shares in security name)
"""
logger.info("Downloading NASDAQ Trader symbol directories ...")
nas = _download_nasdaq_trader(_NASDAQ_LISTED_URL)
oth = _download_nasdaq_trader(_OTHER_LISTED_URL)
logger.info("nasdaqlisted: %d rows, otherlisted: %d rows", len(nas), len(oth))
candidates: list[tuple[str, str, str]] = [] # (ticker, exchange_code, security_name)
# ── nasdaqlisted.txt fields: Symbol|Security Name|Market Category|Test Issue|Financial Status|Round Lot Size|ETF|NextShares
if not nas.empty:
nas = nas[nas["Test Issue"].astype(str).str.upper() != "Y"]
nas = nas[nas["ETF"].astype(str).str.upper() != "Y"]
for _, row in nas.iterrows():
sym = str(row.get("Symbol", "")).strip().upper()
sec_name = str(row.get("Security Name", ""))
if not sym or sym == "NAN":
continue
candidates.append((sym, "NASDAQ", sec_name))
# ── otherlisted.txt fields: ACT Symbol|Security Name|Exchange|CQS Symbol|ETF|Round Lot Size|Test Issue|NASDAQ Symbol
# Exchange codes: A=NYSE Mkt (AMEX), N=NYSE, P=NYSE Arca, Z=BATS, V=IEX
if not oth.empty:
oth = oth[oth["Test Issue"].astype(str).str.upper() != "Y"]
oth = oth[oth["ETF"].astype(str).str.upper() != "Y"]
# Keep only NYSE-family exchanges
oth = oth[oth["Exchange"].astype(str).str.upper().isin(["N", "A"])]
for _, row in oth.iterrows():
sym = str(row.get("ACT Symbol", "")).strip().upper()
sec_name = str(row.get("Security Name", ""))
exch = "NYSE" if row.get("Exchange") == "N" else "NYSE_MKT"
if not sym or sym == "NAN":
continue
candidates.append((sym, exch, sec_name))
# Filter to common stock only (drop preferreds, warrants, units, notes,
# rights, depositary shares, etc.). Use security name keyword whitelist
# — most US-listed equities have "Common Stock" or "Common Shares".
common_kws = ("common stock", "common share", "ordinary share", "class a common",
"class b common", "class c common")
drop_kws = ("preferred", "warrant", "unit ", " unit", "% notes", "depositary",
"right ", " rights", "subordinate", "convertible", "trust preferred",
"% senior", "debenture", " etn ", "exchange-traded note")
# Build per-ticker dict (dedupe by ticker, prefer first occurrence)
by_ticker: dict[str, dict] = {}
for sym, exch, sec_name in candidates:
sn_low = sec_name.lower()
if any(k in sn_low for k in drop_kws):
continue
if not any(k in sn_low for k in common_kws):
continue
# Drop ticker symbols that look like preferred/warrant variants:
# tickers containing $ or . (preferred class markers like BAC.PA),
# 5-char tickers ending in W (warrant), U (unit), R (rights).
if "$" in sym or "." in sym:
continue
if len(sym) >= 5 and sym.endswith(("W", "U", "R")):
continue
if sym in by_ticker:
continue # first occurrence wins
# Strip the " - Common Stock" suffix from the security name for cleaner display
clean_name = sec_name
for suffix in (" - Common Stock", " - Common Shares", " - Class A Common Stock",
" - Class B Common Stock", " - Class C Common Stock"):
if clean_name.endswith(suffix):
clean_name = clean_name[: -len(suffix)]
break
by_ticker[sym] = {
"ticker": sym,
"exchange": exch,
"name": clean_name.strip(),
}
# Subtract already-known tickers (those in IWM/IJR/IWC)
new_records = [r for sym, r in sorted(by_ticker.items()) if sym not in already_seen]
overlap = sum(1 for sym in by_ticker if sym in already_seen)
logger.info("NASDAQ Trader common-stock candidates: %d (after subtracting "
"%d already-known tickers: %d)", len(by_ticker), overlap, len(new_records))
return new_records
def _fetch_one_market_cap(ticker: str) -> float | None:
"""Fetch a single ticker's company market cap from yfinance.
Uses ONLY `fast_info.market_cap` — a single fast network call. The
deliberately simple approach avoids the multi-fallback hangs that
occur when `tk.info` blocks for 30+ seconds on rate limits or bad
tickers. Tickers where fast_info fails are returned as None and
dropped from the universe per Option A (a small-cap benchmark
cannot include a ticker without a verified market cap).
Returns a float USD value in [_MCAP_MIN_VALID, _MCAP_MAX_VALID]
or None on any failure.
"""
import yfinance as yf # local import — yfinance is heavy
try:
mc = yf.Ticker(ticker).fast_info.market_cap
except Exception:
return None
try:
mcf = float(mc)
except (TypeError, ValueError):
return None
if not math.isfinite(mcf):
return None
if not (_MCAP_MIN_VALID <= mcf <= _MCAP_MAX_VALID):
return None
return mcf
def _serial_fetch_pass(tickers: list[str], pass_label: str) -> dict[str, float | None]:
"""One serial pass over `tickers`. fast_info call + delay per ticker."""
results: dict[str, float | None] = {}
total = len(tickers)
if total == 0:
return results
logger.info("%s: %d tickers, serial, %.2fs delay ...",
pass_label, total, _MCAP_LOOKUP_DELAY_SEC)
t0 = time.time()
for i, t in enumerate(tickers, start=1):
results[t] = _fetch_one_market_cap(t)
time.sleep(_MCAP_LOOKUP_DELAY_SEC)
if i % 200 == 0 or i == total:
elapsed = time.time() - t0
ok = sum(1 for v in results.values() if v is not None)
rate = i / elapsed if elapsed > 0 else 0
eta = (total - i) / rate if rate > 0 else 0
logger.info(" %s progress: %d/%d (ok=%d) — %.0fs elapsed, ETA %.0fs",
pass_label, i, total, ok, elapsed, eta)
return results
def _fetch_market_caps(tickers: list[str]) -> dict[str, float | None]:
"""Fetch market caps via two serial passes for maximum coverage.
Pass 1: serial fast_info call for every ticker (~3 req/s, no rate limit).
Pass 2: serial retry of any tickers that returned None in pass 1 (catches
transient errors; permanent no-data tickers will fail again and
be dropped per Option A).
Pure serial avoids the per-IP rate limit that even 4 workers triggered.
Expected wall time for ~5,345 tickers: ~27 min pass 1 + ~3 min pass 2.
"""
t0 = time.time()
# ── Pass 1: serial over all tickers ──
results = _serial_fetch_pass(tickers, pass_label="Pass 1")
pass1_ok = sum(1 for v in results.values() if v is not None)
logger.info("Pass 1 complete: %d/%d resolved in %.0fs",
pass1_ok, len(tickers), time.time() - t0)
# ── Pass 2: serial retry of pass-1 failures ──
failed = [t for t in tickers if results.get(t) is None]
if failed:
retry_results = _serial_fetch_pass(failed, pass_label="Pass 2 (retry)")
recovered = 0
for t, mc in retry_results.items():
if mc is not None:
results[t] = mc
recovered += 1
logger.info("Pass 2 complete: recovered %d/%d failures",
recovered, len(failed))
final_ok = sum(1 for v in results.values() if v is not None)
logger.info("Total market_cap coverage: %d/%d (%.1f%%) in %.0fs",
final_ok, len(tickers), 100 * final_ok / len(tickers),
time.time() - t0)
return results
def run() -> pd.DataFrame:
"""Execute Step 1 and return the universe DataFrame."""
config.UNIVERSE_DIR.mkdir(parents=True, exist_ok=True)
out_path = config.UNIVERSE_DIR / "benchmark_universe.csv"
if out_path.exists():
logger.info("Universe file already exists at %s, loading.", out_path)
return pd.read_csv(out_path)
def _records_from_ishares(holdings_df: pd.DataFrame, source: str) -> list[dict]:
records = []
for _, row in holdings_df.iterrows():
ticker = row["Ticker"]
mv_str = str(row.get("Market Value", "")).replace(",", "")
try:
market_value = float(mv_str)
except (ValueError, TypeError):
market_value = None
records.append({
"ticker": ticker,
"market_value": market_value,
"sector": row.get("Sector"),
"exchange": row.get("Exchange"),
"name": row.get("Name"),
"source": source,
})
return records
# ----- Russell 2000 from IWM -----
logger.info("Downloading IWM (Russell 2000) holdings ...")
iwm_df = _download_ishares_holdings(config.IWM_HOLDINGS_URL)
logger.info("IWM tickers: %d", len(iwm_df))
iwm_records = _records_from_ishares(iwm_df, source="IWM")
iwm_set = {r["ticker"] for r in iwm_records}
# ----- S&P SmallCap 600 from IJR -----
logger.info("Downloading IJR (S&P SmallCap 600) holdings ...")
ijr_df = _download_ishares_holdings(config.IJR_HOLDINGS_URL)
logger.info("IJR tickers: %d", len(ijr_df))
ijr_records = _records_from_ishares(ijr_df, source="IJR")
# Keep only IJR tickers not already in IWM
ijr_only = [r for r in ijr_records if r["ticker"] not in iwm_set]
logger.info("IJR-only tickers (not in IWM): %d", len(ijr_only))
# ----- Micro-cap from IWC -----
logger.info("Downloading IWC (Micro-Cap) holdings ...")
iwc_df = _download_ishares_holdings(config.IWC_HOLDINGS_URL)
iwc_records = _records_from_ishares(iwc_df, source="IWC")
seen = iwm_set | {r["ticker"] for r in ijr_only}
iwc_only = [r for r in iwc_records if r["ticker"] not in seen]
logger.info("IWC-only tickers (not in IWM or IJR): %d", len(iwc_only))
# ----- Uncovered NYSE/NASDAQ small caps (Prof. Hwang component 3) -----
# "those who are not even included in the index (small caps in NYSE or NASDAQ)"
# We pull the full NASDAQ Trader symbol directories, filter to common stock
# only, subtract everything already in IWM/IJR/IWC, and let the downstream
# mcap pass apply the $7.4B small-cap upper bound. The remainder is the
# set of small caps that are NOT in any major index (recent IPOs,
# between-rebalance additions, dropped-from-index small caps).
seen_for_uncovered = iwm_set | {r["ticker"] for r in ijr_only} | {r["ticker"] for r in iwc_only}
uncovered_seed = _collect_uncovered_smallcaps(seen_for_uncovered)
uncovered_records = [
{
"ticker": rec["ticker"],
"market_value": None, # iShares-only field; not applicable
"sector": None, # filled later by collect_fundamentals
"exchange": rec["exchange"], # populated from NASDAQ Trader directory
"name": rec["name"], # populated from NASDAQ Trader directory
"source": "UNCOVERED",
}
for rec in uncovered_seed
]
logger.info("UNCOVERED small-cap candidates (pre-mcap-filter): %d", len(uncovered_records))
# Build the set of ALL S&P 600 tickers (BEFORE the IJR-only subtraction
# against IWM). This is what `in_sp_smallcap_600` should reflect: an
# IJR ticker is an S&P 600 small-cap regardless of whether it ALSO
# happens to appear in IWM (they overlap by hundreds of names). The
# earlier `source` column does NOT capture this -- a ticker in both
# IWM and IJR carries source='IWM', losing the SP600 attestation.
all_ijr_tickers = {r["ticker"] for r in ijr_records}
# Combine: IWM + IJR-only + IWC-only + UNCOVERED
all_records = []
for r in iwm_records:
all_records.append({
**r,
"in_russell_2000": True,
"in_sp_smallcap_600": r["ticker"] in all_ijr_tickers,
"small_cap_outside": False,
})
for r in ijr_only:
all_records.append({
**r,
"in_russell_2000": False,
"in_sp_smallcap_600": True,
"small_cap_outside": True,
})
for r in iwc_only:
all_records.append({
**r,
"in_russell_2000": False,
"in_sp_smallcap_600": False,
"small_cap_outside": True,
})
for r in uncovered_records:
all_records.append({
**r,
"in_russell_2000": False,
"in_sp_smallcap_600": False,
"small_cap_outside": True,
})
df = pd.DataFrame(all_records)
logger.info("Combined raw universe: %d tickers (IWM=%d, IJR-only=%d, IWC-only=%d, UNCOVERED=%d)",
len(df), len(iwm_records), len(ijr_only), len(iwc_only), len(uncovered_records))
# Normalise multi-class share tickers (iShares strips the dash)
fixed = 0
for old, new in _CLASS_SHARE_FIXES.items():
mask = df["ticker"] == old
if mask.any():
df.loc[mask, "ticker"] = new
fixed += mask.sum()
if fixed:
logger.info("Normalised %d multi-class share tickers (e.g. BFA -> BF-A).", fixed)
# Remove exact duplicates (same ticker appearing as CVR + regular stock)
before = len(df)
df = df.drop_duplicates(subset="ticker", keep="first")
dupes = before - len(df)
if dupes:
logger.info("Removed %d duplicate tickers.", dupes)
# ── Fetch authoritative company market cap from yfinance ──────────────
# NOTE: iShares "market_value" is the ETF's holding value, NOT the
# company's market cap. We fetch the real market cap here so the saved
# universe file is the authoritative small-cap set from the start.
#
# Every ticker in the saved file MUST have a verified market_cap, or it
# is dropped (cannot honestly be classified as small-cap without knowing).
tickers = df["ticker"].tolist()
mcap_map = _fetch_market_caps(tickers)
df["market_cap"] = df["ticker"].map(mcap_map)
# Drop tickers with no reliable market cap (delisted, SPAC residue, ADR glitches)
invalid_mask = df["market_cap"].isna()
invalid_tickers = sorted(df.loc[invalid_mask, "ticker"].tolist())
if invalid_tickers:
logger.warning("Dropped %d tickers with no valid market_cap (showing first 30): %s",
len(invalid_tickers), invalid_tickers[:30])
df = df.loc[~invalid_mask].copy()
# Drop mega-caps from IWC and UNCOVERED sources.
#
# IWM (Russell 2000) and IJR (S&P SmallCap 600) constituents are
# index-designated small-caps by FTSE Russell / S&P Dow Jones methodology
# — we respect those classifications and do NOT filter them by current
# market cap (a few names may have drifted above $7.4B since the last
# index reconstitution, but they remain index-designated small-caps).
#
# IWC has known mega-cap leakage (iShares holds tiny tracking positions
# in NVDA/AAPL/etc. for index-fit reasons) and must be filtered.
#
# UNCOVERED tickers have no index attestation at all and so require
# an explicit small-cap upper bound. The S&P 600 SmallCap upper bound
# ($7.4B) is the official threshold per S&P Dow Jones methodology.
needs_filter = df["source"].isin(["IWC", "UNCOVERED"])
mega_mask = needs_filter & (df["market_cap"] > config.SMALL_CAP_MAX_MEDIAN_MCAP)
mega_rows = df.loc[mega_mask, ["ticker", "source", "market_cap"]].sort_values(
"market_cap", ascending=False
)
if not mega_rows.empty:
logger.warning(
"Dropped %d mega-caps from IWC/UNCOVERED (market_cap > $%.1fB). First 30:\n%s",
len(mega_rows),
config.SMALL_CAP_MAX_MEDIAN_MCAP / 1e9,
mega_rows.head(30).to_string(index=False),
)
df = df.loc[~mega_mask].copy()
logger.info(
"Universe after market-cap filtering: %d tickers (max mcap=$%.2fB, median=$%.2fB)",
len(df),
df["market_cap"].max() / 1e9,
df["market_cap"].median() / 1e9,
)
# Apply MAX_TICKERS cap if set
if config.MAX_TICKERS is not None:
df = df.head(config.MAX_TICKERS)
# Label lower-end by market value percentile (within Russell 2000 subset)
r2k = df[df["in_russell_2000"] & df["market_value"].notna()]
if not r2k.empty:
threshold = r2k["market_value"].quantile(config.LOWER_END_PERCENTILE / 100.0)
df["lower_end_russell2000"] = df["in_russell_2000"] & (df["market_value"] <= threshold)
logger.info("Lower-end R2K threshold: market_value <= %.0f (%d tickers)",
threshold, df["lower_end_russell2000"].sum())
else:
df["lower_end_russell2000"] = False
df = df.sort_values("ticker").reset_index(drop=True)
# Atomic write: write to temp file first, then rename
fd, tmp_path = tempfile.mkstemp(suffix=".csv", dir=out_path.parent)
try:
os.close(fd)
df.to_csv(tmp_path, index=False)
os.replace(tmp_path, out_path)
except BaseException:
try:
os.unlink(tmp_path)
except OSError:
pass
raise
logger.info("Saved universe (%d tickers) to %s", len(df), out_path)
return df