MacroLens / code /collect_macro.py
itouchz's picture
Add files using upload-large-folder tool
5995ef5 verified
"""Step 5: Collect macro-economic context data.
Uses:
- FredClient from projects.tools.finance.fred (interest rates, indices, dollar index)
- EIAClient from projects.tools.commodity.eia (crude oil, natural gas)
Resume logic:
- FRED: per-series file check + freshness validation.
- EIA: per-file freshness check (not per-category!).
If any processed CSV is stale (max date > STALE_DAYS behind END_DATE),
it is deleted and re-fetched.
Output:
data/macro/fred_{SERIES_ID}.csv
data/macro/crude_oil/{name}_raw.csv + {name}.csv
data/macro/natural_gas/{name}_raw.csv + {name}.csv
"""
from __future__ import annotations
import asyncio
import logging
import os
import tempfile
from pathlib import Path
import pandas as pd
from projects.tools.commodity.eia import EIAClient
from projects.tools.finance.fred import FredClient
from . import config
logger = logging.getLogger(__name__)
_MAX_RETRIES = 3
# A processed CSV is considered stale if its latest date is more than
# STALE_DAYS before config.END_DATE.
_STALE_DAYS = 90
async def _retry_async(coro_factory, description: str, retries: int = _MAX_RETRIES):
"""Call *coro_factory()* up to *retries* times with exponential backoff."""
for attempt in range(retries):
try:
return await coro_factory()
except Exception as exc:
if attempt < retries - 1:
wait = 2 ** attempt * 3 # 3s, 6s, 12s
logger.warning("%s failed (attempt %d/%d), retrying in %ds: %s",
description, attempt + 1, retries, wait, exc)
await asyncio.sleep(wait)
else:
raise
def _atomic_csv_write(df: pd.DataFrame, dest: Path) -> None:
"""Write a CSV atomically: write to temp file first, then rename."""
dest.parent.mkdir(parents=True, exist_ok=True)
fd, tmp_path = tempfile.mkstemp(suffix=".csv", dir=dest.parent)
try:
os.close(fd)
df.to_csv(tmp_path, index=False)
os.replace(tmp_path, dest)
except BaseException:
try:
os.unlink(tmp_path)
except OSError:
pass
raise
def _is_stale(csv_path: Path) -> bool:
"""Check if a CSV's latest date is too far behind config.END_DATE."""
if not csv_path.exists():
return True # missing = stale
try:
df = pd.read_csv(csv_path, nrows=0)
date_col = next(
(c for c in df.columns if "date" in c.lower()
or "period" in c.lower() or "time" in c.lower()),
None,
)
if date_col is None:
return False # can't determine, assume OK
df = pd.read_csv(csv_path, usecols=[date_col])
df[date_col] = pd.to_datetime(df[date_col], errors="coerce")
max_date = df[date_col].max()
if pd.isna(max_date):
return True
cutoff = pd.Timestamp(config.END_DATE) - pd.Timedelta(days=_STALE_DAYS)
if max_date < cutoff:
logger.warning(
"STALE: %s latest date is %s (cutoff %s, %d days behind)",
csv_path.name, max_date.date(), cutoff.date(),
(pd.Timestamp(config.END_DATE) - max_date).days,
)
return True
return False
except Exception as exc:
logger.warning("Could not check freshness of %s (treating as stale): %s", csv_path.name, exc)
return True # corrupt / unreadable → treat as stale so it gets re-fetched
# ---------------------------------------------------------------------------
# FRED collection (per-series resume + freshness)
# ---------------------------------------------------------------------------
async def _collect_fred(client: FredClient) -> None:
"""Fetch every FRED series defined in config."""
fred_dir = config.MACRO_DIR
fred_dir.mkdir(parents=True, exist_ok=True)
for series_id, description in config.FRED_SERIES.items():
out_path = fred_dir / f"fred_{series_id}.csv"
if out_path.exists() and not _is_stale(out_path):
logger.info("FRED %s already exists and is fresh, skipping.", series_id)
continue
reason = "stale" if out_path.exists() else "missing"
logger.info("Fetching FRED %s (%s) [%s] ...", series_id, description, reason)
try:
df = await _retry_async(
lambda sid=series_id: client.fetch_series_data(
series_id=sid,
start_date=config.START_DATE,
end_date=config.END_DATE,
),
description=f"FRED {series_id}",
)
_atomic_csv_write(df, out_path)
logger.info("Saved FRED %s (%d rows).", series_id, len(df))
except Exception as exc:
logger.warning("FRED %s failed after retries: %s", series_id, exc)
# ---------------------------------------------------------------------------
# EIA collection (per-file freshness, NOT per-category!)
# ---------------------------------------------------------------------------
async def _collect_eia_category(
client: EIAClient,
category: str,
out_dir: Path,
fetch_fn,
) -> None:
"""Fetch an EIA category, re-downloading only missing or stale files."""
out_dir.mkdir(parents=True, exist_ok=True)
# Inventory existing processed files
existing = {f.stem: f for f in out_dir.glob("*.csv") if "_raw" not in f.stem}
stale_files = [name for name, path in existing.items() if _is_stale(path)]
fresh_count = len(existing) - len(stale_files)
if stale_files:
logger.info(
"EIA %s: %d fresh files, %d stale to re-fetch: %s",
category, fresh_count, len(stale_files), stale_files,
)
# Delete stale files so they get re-written
for name in stale_files:
for suffix in ["", "_raw"]:
p = out_dir / f"{name}{suffix}.csv"
if p.exists():
p.unlink()
logger.info(" Deleted stale %s", p.name)
elif existing:
logger.info("EIA %s: all %d files are fresh, skipping.", category, len(existing))
return
# Fetch all data from the API (EIA client returns all endpoints at once)
logger.info("Fetching EIA %s data ...", category)
try:
results = await _retry_async(fetch_fn, description=f"EIA {category}")
if not results:
logger.warning("EIA %s: all endpoints returned empty (check API key / network).",
category)
return
for name, raw_df, processed_df in results:
processed_path = out_dir / f"{name}.csv"
raw_path = out_dir / f"{name}_raw.csv"
# Only write if the file is missing or was stale
if not processed_path.exists() or name in stale_files:
_atomic_csv_write(raw_df, raw_path)
_atomic_csv_write(processed_df, processed_path)
logger.info(" Saved %s %s (%d raw, %d processed rows).",
category, name, len(raw_df), len(processed_df))
else:
logger.info(" %s %s already fresh, not overwriting.", category, name)
except Exception as exc:
logger.error("EIA %s collection failed after retries: %s: %s",
category, type(exc).__name__, exc, exc_info=True)
async def _collect_eia(client: EIAClient) -> None:
"""Fetch crude oil and natural gas data from EIA (per-file freshness)."""
await _collect_eia_category(
client,
category="crude_oil",
out_dir=config.MACRO_DIR / "crude_oil",
fetch_fn=lambda: client.get_all_crude_oil_data(),
)
await _collect_eia_category(
client,
category="natural_gas",
out_dir=config.MACRO_DIR / "natural_gas",
fetch_fn=lambda: client.get_all_natural_gas_data(),
)
async def run_async() -> None:
"""Execute Step 5 (async)."""
fred_key = os.getenv("FRED_API_KEY")
if not fred_key:
raise ValueError("Set FRED_API_KEY environment variable.")
eia_key = os.getenv("EIA_API_KEY")
if not eia_key:
raise ValueError("Set EIA_API_KEY environment variable.")
fred_client = FredClient(api_key=fred_key)
eia_client = EIAClient(api_key=eia_key)
await _collect_fred(fred_client)
await _collect_eia(eia_client)
logger.info("Macro data collection complete.")
def run() -> None:
"""Sync wrapper around the async implementation."""
asyncio.run(run_async())