File size: 7,203 Bytes
5995ef5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 | #!/usr/bin/env python
"""Run the full MacroLens benchmark pipeline end-to-end.
Usage (foreground):
uv run python -m projects.agent_builder.scripts.whatif_bench.run_pipeline
Usage (background with log):
nohup uv run python -m projects.agent_builder.scripts.whatif_bench.run_pipeline \
> whatif_pipeline.log 2>&1 &
The script mirrors the notebook's steps across 3 layers (plus XBRL
ontology construction), running non-interactively with full logging.
Each step is resumable -- if it detects existing output files it will skip.
To force a full re-run, delete the data/ directory first.
"""
from __future__ import annotations
import asyncio
import logging
import os
import sys
import time
from pathlib import Path
# Ensure project root is importable
_ROOT = Path(__file__).resolve().parents[4] # platform/
if str(_ROOT) not in sys.path:
sys.path.insert(0, str(_ROOT))
# Load .env from project root (contains API keys for SEC, FRED, EIA, RentCast)
_ENV_FILE = _ROOT / ".env"
if _ENV_FILE.exists():
from dotenv import load_dotenv
load_dotenv(_ENV_FILE, override=False)
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger = logging.getLogger("whatif_pipeline")
def _elapsed(start: float) -> str:
secs = time.time() - start
if secs < 60:
return f"{secs:.1f}s"
mins = secs / 60
if mins < 60:
return f"{mins:.1f}m"
return f"{mins / 60:.1f}h"
def main() -> None:
pipeline_start = time.time()
logger.info("=" * 60)
logger.info("MacroLens Benchmark Pipeline -- STARTING")
logger.info("=" * 60)
# ================================================================
# Layer 1: Raw Data Collection
# ================================================================
# Step 1: Universe
logger.info("--- Step 1: Collecting ticker universe ---")
t0 = time.time()
from projects.agent_builder.scripts.whatif_bench import collect_universe
universe_df = collect_universe.run()
logger.info("Step 1 done in %s: %d tickers", _elapsed(t0), len(universe_df))
tickers = universe_df["ticker"].tolist()
# Step 2: Fundamentals
logger.info("--- Step 2: Collecting fundamentals ---")
t0 = time.time()
from projects.agent_builder.scripts.whatif_bench import collect_fundamentals
collect_fundamentals.run(tickers=tickers)
logger.info("Step 2 done in %s", _elapsed(t0))
# Step 3: Daily prices
logger.info("--- Step 3: Collecting daily prices ---")
t0 = time.time()
from projects.agent_builder.scripts.whatif_bench import collect_prices
collect_prices.run(tickers=tickers)
logger.info("Step 3 done in %s", _elapsed(t0))
# Step 4: SEC filings (async)
logger.info("--- Step 4: Collecting SEC filings ---")
t0 = time.time()
from projects.agent_builder.scripts.whatif_bench import collect_filings
asyncio.run(collect_filings.run_async(tickers=tickers))
logger.info("Step 4 done in %s", _elapsed(t0))
# Step 5: Macro data (async)
logger.info("--- Step 5: Collecting macro data ---")
t0 = time.time()
from projects.agent_builder.scripts.whatif_bench import collect_macro
asyncio.run(collect_macro.run_async())
logger.info("Step 5 done in %s", _elapsed(t0))
# Step 6: Real estate (async)
logger.info("--- Step 6: Collecting real estate data ---")
t0 = time.time()
from projects.agent_builder.scripts.whatif_bench import collect_real_estate
asyncio.run(collect_real_estate.run_async())
logger.info("Step 6 done in %s", _elapsed(t0))
# Step 4b: Build XBRL ontology (from XBRL facts collected in Step 4)
logger.info("--- Step 4b: Building XBRL industry ontology ---")
t0 = time.time()
from projects.agent_builder.scripts.whatif_bench import build_ontology
ontology_summary = build_ontology.run()
logger.info("Step 4b done in %s: %s", _elapsed(t0), ontology_summary)
# ================================================================
# Layer 2: Preprocessing (all granularities)
# ================================================================
from projects.agent_builder.scripts.whatif_bench import preprocess
_GRANULARITIES = ["daily", "weekly", "monthly"]
for gran in _GRANULARITIES:
logger.info("--- Step 7: Preprocessing (%s) ---", gran)
t0 = time.time()
panel_df = preprocess.run(granularity=gran)
logger.info(
"Step 7 done (%s) in %s: %d rows, %d columns",
gran, _elapsed(t0), len(panel_df), len(panel_df.columns),
)
# ================================================================
# Layer 3: Benchmark Construction (all granularities)
# ================================================================
from projects.agent_builder.scripts.whatif_bench import assemble_benchmark
from projects.agent_builder.scripts.whatif_bench import generate_scenarios
for gran in _GRANULARITIES:
# Step 8: Assemble benchmark
logger.info("--- Step 8: Assembling benchmark (%s) ---", gran)
t0 = time.time()
assemble_benchmark.run(granularity=gran)
logger.info("Step 8 done (%s) in %s", gran, _elapsed(t0))
# Step 9: Generate scenarios
logger.info("--- Step 9: Generating scenarios (%s) ---", gran)
t0 = time.time()
generate_scenarios.run(granularity=gran)
logger.info("Step 9 done (%s) in %s", gran, _elapsed(t0))
# Step 10: News collection (after scenarios exist)
logger.info("--- Step 10: Collecting news ---")
t0 = time.time()
from projects.agent_builder.scripts.whatif_bench import collect_news
asyncio.run(collect_news.run_async(tickers=tickers))
logger.info("Step 10 done in %s", _elapsed(t0))
# Step 11: Enrich benchmark with news-derived features (all granularities)
from projects.agent_builder.scripts.whatif_bench import enrich_benchmark
for gran in _GRANULARITIES:
logger.info("--- Step 11: Enriching benchmark (%s) ---", gran)
t0 = time.time()
enrich_benchmark.run(granularity=gran)
logger.info("Step 11 done (%s) in %s", gran, _elapsed(t0))
# Step 12: Build valuation benchmark ground truth (all granularities)
logger.info("--- Step 12: Building valuation benchmark ---")
for gran in _GRANULARITIES:
t0 = time.time()
try:
from projects.agent_builder.scripts.whatif_bench.build_valuation_tasks import (
build_valuation_benchmark,
)
summary = build_valuation_benchmark(granularity=gran)
logger.info("Step 12 done (%s) in %s: %s", gran, _elapsed(t0), summary)
except Exception:
logger.warning("Step 12 (%s) skipped or failed:", gran, exc_info=True)
# ================================================================
# Done
# ================================================================
logger.info("=" * 60)
logger.info("Pipeline COMPLETE in %s", _elapsed(pipeline_start))
logger.info("=" * 60)
if __name__ == "__main__":
main()
|