MacroLens / code /run_pipeline.py
itouchz's picture
Add files using upload-large-folder tool
5995ef5 verified
#!/usr/bin/env python
"""Run the full MacroLens benchmark pipeline end-to-end.
Usage (foreground):
uv run python -m projects.agent_builder.scripts.whatif_bench.run_pipeline
Usage (background with log):
nohup uv run python -m projects.agent_builder.scripts.whatif_bench.run_pipeline \
> whatif_pipeline.log 2>&1 &
The script mirrors the notebook's steps across 3 layers (plus XBRL
ontology construction), running non-interactively with full logging.
Each step is resumable -- if it detects existing output files it will skip.
To force a full re-run, delete the data/ directory first.
"""
from __future__ import annotations
import asyncio
import logging
import os
import sys
import time
from pathlib import Path
# Ensure project root is importable
_ROOT = Path(__file__).resolve().parents[4] # platform/
if str(_ROOT) not in sys.path:
sys.path.insert(0, str(_ROOT))
# Load .env from project root (contains API keys for SEC, FRED, EIA, RentCast)
_ENV_FILE = _ROOT / ".env"
if _ENV_FILE.exists():
from dotenv import load_dotenv
load_dotenv(_ENV_FILE, override=False)
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger = logging.getLogger("whatif_pipeline")
def _elapsed(start: float) -> str:
secs = time.time() - start
if secs < 60:
return f"{secs:.1f}s"
mins = secs / 60
if mins < 60:
return f"{mins:.1f}m"
return f"{mins / 60:.1f}h"
def main() -> None:
pipeline_start = time.time()
logger.info("=" * 60)
logger.info("MacroLens Benchmark Pipeline -- STARTING")
logger.info("=" * 60)
# ================================================================
# Layer 1: Raw Data Collection
# ================================================================
# Step 1: Universe
logger.info("--- Step 1: Collecting ticker universe ---")
t0 = time.time()
from projects.agent_builder.scripts.whatif_bench import collect_universe
universe_df = collect_universe.run()
logger.info("Step 1 done in %s: %d tickers", _elapsed(t0), len(universe_df))
tickers = universe_df["ticker"].tolist()
# Step 2: Fundamentals
logger.info("--- Step 2: Collecting fundamentals ---")
t0 = time.time()
from projects.agent_builder.scripts.whatif_bench import collect_fundamentals
collect_fundamentals.run(tickers=tickers)
logger.info("Step 2 done in %s", _elapsed(t0))
# Step 3: Daily prices
logger.info("--- Step 3: Collecting daily prices ---")
t0 = time.time()
from projects.agent_builder.scripts.whatif_bench import collect_prices
collect_prices.run(tickers=tickers)
logger.info("Step 3 done in %s", _elapsed(t0))
# Step 4: SEC filings (async)
logger.info("--- Step 4: Collecting SEC filings ---")
t0 = time.time()
from projects.agent_builder.scripts.whatif_bench import collect_filings
asyncio.run(collect_filings.run_async(tickers=tickers))
logger.info("Step 4 done in %s", _elapsed(t0))
# Step 5: Macro data (async)
logger.info("--- Step 5: Collecting macro data ---")
t0 = time.time()
from projects.agent_builder.scripts.whatif_bench import collect_macro
asyncio.run(collect_macro.run_async())
logger.info("Step 5 done in %s", _elapsed(t0))
# Step 6: Real estate (async)
logger.info("--- Step 6: Collecting real estate data ---")
t0 = time.time()
from projects.agent_builder.scripts.whatif_bench import collect_real_estate
asyncio.run(collect_real_estate.run_async())
logger.info("Step 6 done in %s", _elapsed(t0))
# Step 4b: Build XBRL ontology (from XBRL facts collected in Step 4)
logger.info("--- Step 4b: Building XBRL industry ontology ---")
t0 = time.time()
from projects.agent_builder.scripts.whatif_bench import build_ontology
ontology_summary = build_ontology.run()
logger.info("Step 4b done in %s: %s", _elapsed(t0), ontology_summary)
# ================================================================
# Layer 2: Preprocessing (all granularities)
# ================================================================
from projects.agent_builder.scripts.whatif_bench import preprocess
_GRANULARITIES = ["daily", "weekly", "monthly"]
for gran in _GRANULARITIES:
logger.info("--- Step 7: Preprocessing (%s) ---", gran)
t0 = time.time()
panel_df = preprocess.run(granularity=gran)
logger.info(
"Step 7 done (%s) in %s: %d rows, %d columns",
gran, _elapsed(t0), len(panel_df), len(panel_df.columns),
)
# ================================================================
# Layer 3: Benchmark Construction (all granularities)
# ================================================================
from projects.agent_builder.scripts.whatif_bench import assemble_benchmark
from projects.agent_builder.scripts.whatif_bench import generate_scenarios
for gran in _GRANULARITIES:
# Step 8: Assemble benchmark
logger.info("--- Step 8: Assembling benchmark (%s) ---", gran)
t0 = time.time()
assemble_benchmark.run(granularity=gran)
logger.info("Step 8 done (%s) in %s", gran, _elapsed(t0))
# Step 9: Generate scenarios
logger.info("--- Step 9: Generating scenarios (%s) ---", gran)
t0 = time.time()
generate_scenarios.run(granularity=gran)
logger.info("Step 9 done (%s) in %s", gran, _elapsed(t0))
# Step 10: News collection (after scenarios exist)
logger.info("--- Step 10: Collecting news ---")
t0 = time.time()
from projects.agent_builder.scripts.whatif_bench import collect_news
asyncio.run(collect_news.run_async(tickers=tickers))
logger.info("Step 10 done in %s", _elapsed(t0))
# Step 11: Enrich benchmark with news-derived features (all granularities)
from projects.agent_builder.scripts.whatif_bench import enrich_benchmark
for gran in _GRANULARITIES:
logger.info("--- Step 11: Enriching benchmark (%s) ---", gran)
t0 = time.time()
enrich_benchmark.run(granularity=gran)
logger.info("Step 11 done (%s) in %s", gran, _elapsed(t0))
# Step 12: Build valuation benchmark ground truth (all granularities)
logger.info("--- Step 12: Building valuation benchmark ---")
for gran in _GRANULARITIES:
t0 = time.time()
try:
from projects.agent_builder.scripts.whatif_bench.build_valuation_tasks import (
build_valuation_benchmark,
)
summary = build_valuation_benchmark(granularity=gran)
logger.info("Step 12 done (%s) in %s: %s", gran, _elapsed(t0), summary)
except Exception:
logger.warning("Step 12 (%s) skipped or failed:", gran, exc_info=True)
# ================================================================
# Done
# ================================================================
logger.info("=" * 60)
logger.info("Pipeline COMPLETE in %s", _elapsed(pipeline_start))
logger.info("=" * 60)
if __name__ == "__main__":
main()