| """Step 1: Collect the small-cap ticker universe. |
| |
| Universe definition: union of small-cap-and-below tickers from major |
| S&P/Russell/iShares ETFs: |
| |
| - IWM: iShares Russell 2000 ETF (Russell 2000 small-caps) |
| - IJR: iShares Core S&P SmallCap ETF (S&P 600 small-caps) |
| - IWC: iShares Micro-Cap ETF (micro-caps below small-cap threshold) |
| |
| Tickers exceeding the S&P 600 upper bound ($7.4B median market cap) are |
| filtered out downstream in preprocess.py via SMALL_CAP_MAX_MEDIAN_MCAP. |
| We do NOT filter on ETF holding value here because it does not correlate |
| with actual company market cap (mega-caps may have small ETF positions). |
| |
| This satisfies Prof. Hwang's Requirement 1.1: "Collect R2K + small caps". |
| |
| - Uses iShares CSV data directly for market value, sector, exchange. |
| - Normalises multi-class share tickers (e.g. BFA -> BF-A) so yfinance can find them. |
| - Removes duplicates, zero-price entries, and non-equity rows. |
| |
| Output: data/universe/benchmark_universe.csv |
| """ |
|
|
| from __future__ import annotations |
|
|
| import io |
| import logging |
| import math |
| import os |
| import tempfile |
| import time |
|
|
| import httpx |
| import pandas as pd |
|
|
| from . import config |
|
|
| logger = logging.getLogger(__name__) |
|
|
| _MAX_HTTP_RETRIES = 3 |
|
|
| |
| |
| _MCAP_MIN_VALID = 1.0e5 |
| _MCAP_MAX_VALID = 1.0e13 |
|
|
| |
| |
| |
| |
| |
| |
| |
| _MCAP_LOOKUP_DELAY_SEC = 0.3 |
|
|
| |
| |
| _CLASS_SHARE_FIXES: dict[str, str] = { |
| "BFA": "BF-A", |
| "BFB": "BF-B", |
| "BRKB": "BRK-B", |
| "LENB": "LEN-B", |
| "MOGA": "MOG-A", |
| "MOGB": "MOG-B", |
| "GEFB": "GEF-B", |
| "CWENA": "CWEN-A", |
| "UHALB": "UHAL-B", |
| "CRDA": "CRD-A", |
| "CRDB": "CRD-B", |
| } |
|
|
| |
| |
| |
| |
| |
| _NASDAQ_LISTED_URL = "https://www.nasdaqtrader.com/dynamic/symdir/nasdaqlisted.txt" |
| _OTHER_LISTED_URL = "https://www.nasdaqtrader.com/dynamic/symdir/otherlisted.txt" |
|
|
|
|
| def _download_ishares_holdings(url: str) -> pd.DataFrame: |
| """Download iShares ETF holdings CSV and return a cleaned DataFrame.""" |
| for attempt in range(_MAX_HTTP_RETRIES): |
| try: |
| resp = httpx.get(url, follow_redirects=True, timeout=60) |
| resp.raise_for_status() |
| break |
| except Exception as exc: |
| if attempt < _MAX_HTTP_RETRIES - 1: |
| wait = 2 ** attempt * 5 |
| logger.warning("iShares download failed (attempt %d/%d), retrying in %ds: %s", |
| attempt + 1, _MAX_HTTP_RETRIES, wait, exc) |
| time.sleep(wait) |
| else: |
| raise |
| text = resp.text |
|
|
| |
| lines = text.splitlines() |
| header_idx = 0 |
| for i, line in enumerate(lines): |
| if line.strip().lower().startswith("ticker"): |
| header_idx = i |
| break |
|
|
| csv_text = "\n".join(lines[header_idx:]) |
| df = pd.read_csv(io.StringIO(csv_text)) |
| df.columns = [c.strip() for c in df.columns] |
| if "Ticker" in df.columns: |
| df = df[df["Ticker"].notna() & (df["Ticker"].str.strip() != "-") & (df["Ticker"].str.strip() != "")] |
| df["Ticker"] = df["Ticker"].str.strip().str.upper() |
| |
| df = df[df["Ticker"].str.len() <= 10] |
| |
| if "Asset Class" in df.columns: |
| before = len(df) |
| df = df[df["Asset Class"].str.strip().str.lower() == "equity"] |
| dropped = before - len(df) |
| if dropped > 0: |
| logger.info("Filtered %d non-equity entries (kept %d equities).", dropped, len(df)) |
| |
| |
| if "Price" in df.columns: |
| price_num = pd.to_numeric(df["Price"].astype(str).str.replace(",", ""), errors="coerce") |
| before = len(df) |
| df = df[price_num > 0] |
| dropped = before - len(df) |
| if dropped > 0: |
| logger.info("Filtered %d zero-price entries (CVRs/escrows/delisted).", dropped) |
| return df |
|
|
|
|
| def _download_nasdaq_trader(url: str) -> pd.DataFrame: |
| """Download a pipe-delimited NASDAQ Trader symbol directory file. |
| |
| Both nasdaqlisted.txt and otherlisted.txt share the same format: |
| pipe-delimited, one header row, last line is a 'File Creation Time' |
| footer that must be skipped. |
| """ |
| for attempt in range(_MAX_HTTP_RETRIES): |
| try: |
| resp = httpx.get(url, follow_redirects=True, timeout=60) |
| resp.raise_for_status() |
| break |
| except Exception as exc: |
| if attempt < _MAX_HTTP_RETRIES - 1: |
| wait = 2 ** attempt * 5 |
| logger.warning("NASDAQ Trader download failed (attempt %d/%d), retrying in %ds: %s", |
| attempt + 1, _MAX_HTTP_RETRIES, wait, exc) |
| time.sleep(wait) |
| else: |
| raise |
| text = resp.text |
|
|
| |
| lines = [ln for ln in text.splitlines() if ln and not ln.startswith("File Creation Time")] |
| df = pd.read_csv(io.StringIO("\n".join(lines)), sep="|") |
| df.columns = [c.strip() for c in df.columns] |
| return df |
|
|
|
|
| def _collect_uncovered_smallcaps(already_seen: set[str]) -> list[dict]: |
| """Return candidate records for Prof. Hwang's third universe component: |
| small caps listed on NYSE/NASDAQ that are NOT in any major index |
| (specifically not in the IWM/IJR/IWC ETF holdings already collected). |
| |
| Each record is a dict with keys: ticker, exchange, name. The exchange |
| and security name come directly from the NASDAQ Trader symbol directory |
| files (no extra API calls). Sector is filled later by collect_fundamentals. |
| |
| The mcap filter (≤ $7.4B) is applied later in run() via the same serial |
| yfinance lookup pass; this function only produces the candidate set. |
| |
| Filtering rules: |
| - Drop ETFs (ETF=Y in nasdaqlisted.txt) |
| - Drop test issues (Test Issue=Y) |
| - Drop tickers already in IWM/IJR/IWC (passed via `already_seen`) |
| - Drop preferreds (containing '$' or '.' which mark preferred classes) |
| - Drop warrants and units (suffix W/U/R on a 5-char base) |
| - Keep only common stock (Common Stock / Common Shares in security name) |
| """ |
| logger.info("Downloading NASDAQ Trader symbol directories ...") |
| nas = _download_nasdaq_trader(_NASDAQ_LISTED_URL) |
| oth = _download_nasdaq_trader(_OTHER_LISTED_URL) |
| logger.info("nasdaqlisted: %d rows, otherlisted: %d rows", len(nas), len(oth)) |
|
|
| candidates: list[tuple[str, str, str]] = [] |
|
|
| |
| if not nas.empty: |
| nas = nas[nas["Test Issue"].astype(str).str.upper() != "Y"] |
| nas = nas[nas["ETF"].astype(str).str.upper() != "Y"] |
| for _, row in nas.iterrows(): |
| sym = str(row.get("Symbol", "")).strip().upper() |
| sec_name = str(row.get("Security Name", "")) |
| if not sym or sym == "NAN": |
| continue |
| candidates.append((sym, "NASDAQ", sec_name)) |
|
|
| |
| |
| if not oth.empty: |
| oth = oth[oth["Test Issue"].astype(str).str.upper() != "Y"] |
| oth = oth[oth["ETF"].astype(str).str.upper() != "Y"] |
| |
| oth = oth[oth["Exchange"].astype(str).str.upper().isin(["N", "A"])] |
| for _, row in oth.iterrows(): |
| sym = str(row.get("ACT Symbol", "")).strip().upper() |
| sec_name = str(row.get("Security Name", "")) |
| exch = "NYSE" if row.get("Exchange") == "N" else "NYSE_MKT" |
| if not sym or sym == "NAN": |
| continue |
| candidates.append((sym, exch, sec_name)) |
|
|
| |
| |
| |
| common_kws = ("common stock", "common share", "ordinary share", "class a common", |
| "class b common", "class c common") |
| drop_kws = ("preferred", "warrant", "unit ", " unit", "% notes", "depositary", |
| "right ", " rights", "subordinate", "convertible", "trust preferred", |
| "% senior", "debenture", " etn ", "exchange-traded note") |
|
|
| |
| by_ticker: dict[str, dict] = {} |
| for sym, exch, sec_name in candidates: |
| sn_low = sec_name.lower() |
| if any(k in sn_low for k in drop_kws): |
| continue |
| if not any(k in sn_low for k in common_kws): |
| continue |
| |
| |
| |
| if "$" in sym or "." in sym: |
| continue |
| if len(sym) >= 5 and sym.endswith(("W", "U", "R")): |
| continue |
| if sym in by_ticker: |
| continue |
| |
| clean_name = sec_name |
| for suffix in (" - Common Stock", " - Common Shares", " - Class A Common Stock", |
| " - Class B Common Stock", " - Class C Common Stock"): |
| if clean_name.endswith(suffix): |
| clean_name = clean_name[: -len(suffix)] |
| break |
| by_ticker[sym] = { |
| "ticker": sym, |
| "exchange": exch, |
| "name": clean_name.strip(), |
| } |
|
|
| |
| new_records = [r for sym, r in sorted(by_ticker.items()) if sym not in already_seen] |
| overlap = sum(1 for sym in by_ticker if sym in already_seen) |
| logger.info("NASDAQ Trader common-stock candidates: %d (after subtracting " |
| "%d already-known tickers: %d)", len(by_ticker), overlap, len(new_records)) |
| return new_records |
|
|
|
|
| def _fetch_one_market_cap(ticker: str) -> float | None: |
| """Fetch a single ticker's company market cap from yfinance. |
| |
| Uses ONLY `fast_info.market_cap` — a single fast network call. The |
| deliberately simple approach avoids the multi-fallback hangs that |
| occur when `tk.info` blocks for 30+ seconds on rate limits or bad |
| tickers. Tickers where fast_info fails are returned as None and |
| dropped from the universe per Option A (a small-cap benchmark |
| cannot include a ticker without a verified market cap). |
| |
| Returns a float USD value in [_MCAP_MIN_VALID, _MCAP_MAX_VALID] |
| or None on any failure. |
| """ |
| import yfinance as yf |
|
|
| try: |
| mc = yf.Ticker(ticker).fast_info.market_cap |
| except Exception: |
| return None |
| try: |
| mcf = float(mc) |
| except (TypeError, ValueError): |
| return None |
| if not math.isfinite(mcf): |
| return None |
| if not (_MCAP_MIN_VALID <= mcf <= _MCAP_MAX_VALID): |
| return None |
| return mcf |
|
|
|
|
| def _serial_fetch_pass(tickers: list[str], pass_label: str) -> dict[str, float | None]: |
| """One serial pass over `tickers`. fast_info call + delay per ticker.""" |
| results: dict[str, float | None] = {} |
| total = len(tickers) |
| if total == 0: |
| return results |
| logger.info("%s: %d tickers, serial, %.2fs delay ...", |
| pass_label, total, _MCAP_LOOKUP_DELAY_SEC) |
| t0 = time.time() |
| for i, t in enumerate(tickers, start=1): |
| results[t] = _fetch_one_market_cap(t) |
| time.sleep(_MCAP_LOOKUP_DELAY_SEC) |
| if i % 200 == 0 or i == total: |
| elapsed = time.time() - t0 |
| ok = sum(1 for v in results.values() if v is not None) |
| rate = i / elapsed if elapsed > 0 else 0 |
| eta = (total - i) / rate if rate > 0 else 0 |
| logger.info(" %s progress: %d/%d (ok=%d) — %.0fs elapsed, ETA %.0fs", |
| pass_label, i, total, ok, elapsed, eta) |
| return results |
|
|
|
|
| def _fetch_market_caps(tickers: list[str]) -> dict[str, float | None]: |
| """Fetch market caps via two serial passes for maximum coverage. |
| |
| Pass 1: serial fast_info call for every ticker (~3 req/s, no rate limit). |
| Pass 2: serial retry of any tickers that returned None in pass 1 (catches |
| transient errors; permanent no-data tickers will fail again and |
| be dropped per Option A). |
| |
| Pure serial avoids the per-IP rate limit that even 4 workers triggered. |
| Expected wall time for ~5,345 tickers: ~27 min pass 1 + ~3 min pass 2. |
| """ |
| t0 = time.time() |
|
|
| |
| results = _serial_fetch_pass(tickers, pass_label="Pass 1") |
| pass1_ok = sum(1 for v in results.values() if v is not None) |
| logger.info("Pass 1 complete: %d/%d resolved in %.0fs", |
| pass1_ok, len(tickers), time.time() - t0) |
|
|
| |
| failed = [t for t in tickers if results.get(t) is None] |
| if failed: |
| retry_results = _serial_fetch_pass(failed, pass_label="Pass 2 (retry)") |
| recovered = 0 |
| for t, mc in retry_results.items(): |
| if mc is not None: |
| results[t] = mc |
| recovered += 1 |
| logger.info("Pass 2 complete: recovered %d/%d failures", |
| recovered, len(failed)) |
|
|
| final_ok = sum(1 for v in results.values() if v is not None) |
| logger.info("Total market_cap coverage: %d/%d (%.1f%%) in %.0fs", |
| final_ok, len(tickers), 100 * final_ok / len(tickers), |
| time.time() - t0) |
| return results |
|
|
|
|
| def run() -> pd.DataFrame: |
| """Execute Step 1 and return the universe DataFrame.""" |
| config.UNIVERSE_DIR.mkdir(parents=True, exist_ok=True) |
| out_path = config.UNIVERSE_DIR / "benchmark_universe.csv" |
|
|
| if out_path.exists(): |
| logger.info("Universe file already exists at %s, loading.", out_path) |
| return pd.read_csv(out_path) |
|
|
| def _records_from_ishares(holdings_df: pd.DataFrame, source: str) -> list[dict]: |
| records = [] |
| for _, row in holdings_df.iterrows(): |
| ticker = row["Ticker"] |
| mv_str = str(row.get("Market Value", "")).replace(",", "") |
| try: |
| market_value = float(mv_str) |
| except (ValueError, TypeError): |
| market_value = None |
| records.append({ |
| "ticker": ticker, |
| "market_value": market_value, |
| "sector": row.get("Sector"), |
| "exchange": row.get("Exchange"), |
| "name": row.get("Name"), |
| "source": source, |
| }) |
| return records |
|
|
| |
| logger.info("Downloading IWM (Russell 2000) holdings ...") |
| iwm_df = _download_ishares_holdings(config.IWM_HOLDINGS_URL) |
| logger.info("IWM tickers: %d", len(iwm_df)) |
| iwm_records = _records_from_ishares(iwm_df, source="IWM") |
| iwm_set = {r["ticker"] for r in iwm_records} |
|
|
| |
| logger.info("Downloading IJR (S&P SmallCap 600) holdings ...") |
| ijr_df = _download_ishares_holdings(config.IJR_HOLDINGS_URL) |
| logger.info("IJR tickers: %d", len(ijr_df)) |
| ijr_records = _records_from_ishares(ijr_df, source="IJR") |
| |
| ijr_only = [r for r in ijr_records if r["ticker"] not in iwm_set] |
| logger.info("IJR-only tickers (not in IWM): %d", len(ijr_only)) |
|
|
| |
| logger.info("Downloading IWC (Micro-Cap) holdings ...") |
| iwc_df = _download_ishares_holdings(config.IWC_HOLDINGS_URL) |
| iwc_records = _records_from_ishares(iwc_df, source="IWC") |
| seen = iwm_set | {r["ticker"] for r in ijr_only} |
| iwc_only = [r for r in iwc_records if r["ticker"] not in seen] |
| logger.info("IWC-only tickers (not in IWM or IJR): %d", len(iwc_only)) |
|
|
| |
| |
| |
| |
| |
| |
| |
| seen_for_uncovered = iwm_set | {r["ticker"] for r in ijr_only} | {r["ticker"] for r in iwc_only} |
| uncovered_seed = _collect_uncovered_smallcaps(seen_for_uncovered) |
| uncovered_records = [ |
| { |
| "ticker": rec["ticker"], |
| "market_value": None, |
| "sector": None, |
| "exchange": rec["exchange"], |
| "name": rec["name"], |
| "source": "UNCOVERED", |
| } |
| for rec in uncovered_seed |
| ] |
| logger.info("UNCOVERED small-cap candidates (pre-mcap-filter): %d", len(uncovered_records)) |
|
|
| |
| |
| |
| |
| |
| |
| all_ijr_tickers = {r["ticker"] for r in ijr_records} |
|
|
| |
| all_records = [] |
| for r in iwm_records: |
| all_records.append({ |
| **r, |
| "in_russell_2000": True, |
| "in_sp_smallcap_600": r["ticker"] in all_ijr_tickers, |
| "small_cap_outside": False, |
| }) |
| for r in ijr_only: |
| all_records.append({ |
| **r, |
| "in_russell_2000": False, |
| "in_sp_smallcap_600": True, |
| "small_cap_outside": True, |
| }) |
| for r in iwc_only: |
| all_records.append({ |
| **r, |
| "in_russell_2000": False, |
| "in_sp_smallcap_600": False, |
| "small_cap_outside": True, |
| }) |
| for r in uncovered_records: |
| all_records.append({ |
| **r, |
| "in_russell_2000": False, |
| "in_sp_smallcap_600": False, |
| "small_cap_outside": True, |
| }) |
|
|
| df = pd.DataFrame(all_records) |
| logger.info("Combined raw universe: %d tickers (IWM=%d, IJR-only=%d, IWC-only=%d, UNCOVERED=%d)", |
| len(df), len(iwm_records), len(ijr_only), len(iwc_only), len(uncovered_records)) |
|
|
| |
| fixed = 0 |
| for old, new in _CLASS_SHARE_FIXES.items(): |
| mask = df["ticker"] == old |
| if mask.any(): |
| df.loc[mask, "ticker"] = new |
| fixed += mask.sum() |
| if fixed: |
| logger.info("Normalised %d multi-class share tickers (e.g. BFA -> BF-A).", fixed) |
|
|
| |
| before = len(df) |
| df = df.drop_duplicates(subset="ticker", keep="first") |
| dupes = before - len(df) |
| if dupes: |
| logger.info("Removed %d duplicate tickers.", dupes) |
|
|
| |
| |
| |
| |
| |
| |
| |
| tickers = df["ticker"].tolist() |
| mcap_map = _fetch_market_caps(tickers) |
| df["market_cap"] = df["ticker"].map(mcap_map) |
|
|
| |
| invalid_mask = df["market_cap"].isna() |
| invalid_tickers = sorted(df.loc[invalid_mask, "ticker"].tolist()) |
| if invalid_tickers: |
| logger.warning("Dropped %d tickers with no valid market_cap (showing first 30): %s", |
| len(invalid_tickers), invalid_tickers[:30]) |
| df = df.loc[~invalid_mask].copy() |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| needs_filter = df["source"].isin(["IWC", "UNCOVERED"]) |
| mega_mask = needs_filter & (df["market_cap"] > config.SMALL_CAP_MAX_MEDIAN_MCAP) |
| mega_rows = df.loc[mega_mask, ["ticker", "source", "market_cap"]].sort_values( |
| "market_cap", ascending=False |
| ) |
| if not mega_rows.empty: |
| logger.warning( |
| "Dropped %d mega-caps from IWC/UNCOVERED (market_cap > $%.1fB). First 30:\n%s", |
| len(mega_rows), |
| config.SMALL_CAP_MAX_MEDIAN_MCAP / 1e9, |
| mega_rows.head(30).to_string(index=False), |
| ) |
| df = df.loc[~mega_mask].copy() |
|
|
| logger.info( |
| "Universe after market-cap filtering: %d tickers (max mcap=$%.2fB, median=$%.2fB)", |
| len(df), |
| df["market_cap"].max() / 1e9, |
| df["market_cap"].median() / 1e9, |
| ) |
|
|
| |
| if config.MAX_TICKERS is not None: |
| df = df.head(config.MAX_TICKERS) |
|
|
| |
| r2k = df[df["in_russell_2000"] & df["market_value"].notna()] |
| if not r2k.empty: |
| threshold = r2k["market_value"].quantile(config.LOWER_END_PERCENTILE / 100.0) |
| df["lower_end_russell2000"] = df["in_russell_2000"] & (df["market_value"] <= threshold) |
| logger.info("Lower-end R2K threshold: market_value <= %.0f (%d tickers)", |
| threshold, df["lower_end_russell2000"].sum()) |
| else: |
| df["lower_end_russell2000"] = False |
|
|
| df = df.sort_values("ticker").reset_index(drop=True) |
| |
| fd, tmp_path = tempfile.mkstemp(suffix=".csv", dir=out_path.parent) |
| try: |
| os.close(fd) |
| df.to_csv(tmp_path, index=False) |
| os.replace(tmp_path, out_path) |
| except BaseException: |
| try: |
| os.unlink(tmp_path) |
| except OSError: |
| pass |
| raise |
| logger.info("Saved universe (%d tickers) to %s", len(df), out_path) |
| return df |
|
|