quantarena-artifacts / tools /build_release_bundle.py
NIPS26Repo's picture
Initial anonymized artifact release
149513d verified
#!/usr/bin/env python3
"""Build the artifact release bundle.
Copies all paper-referenced run artifacts into release_data/ with a
human-readable directory structure and produces:
- release_data/manifest.json
- release_data/derived/all_trades.csv
- release_data/derived/all_metrics.csv
By default, the script infers the repository root from its installed
location under release_data/tools/. For nonstandard layouts, set
QUANTARENA_REPO or QUANTARENA_INGEST_DIR explicitly.
"""
from __future__ import annotations
import csv
import json
import os
import shutil
from pathlib import Path
from typing import Iterable
SCRIPT_DIR = Path(__file__).resolve().parent
REPO = Path(os.environ.get("QUANTARENA_REPO", SCRIPT_DIR.parents[1])).resolve()
SRC_BACKTEST = REPO / "reports" / "backtest"
SRC_MP = REPO / "reports" / "multi_personality"
SRC_INGEST = Path(
os.environ.get("QUANTARENA_INGEST_DIR", REPO / "latex" / "data" / "artifact_ingest")
).resolve()
SRC_DOCS = REPO / "latex" / "docs"
SRC_AUDIT = REPO / "latex" / "audit"
RELEASE = Path(os.environ.get("QUANTARENA_RELEASE_DIR", REPO / "release_data")).resolve()
# (display_name, mandate_dirname, run_id)
EXP1_US = [
("Fundamental Value", "fundamental_value", "mp_fundamental_value_20260409_165131_876468"),
("Macro Tactical", "macro_tactical", "mp_macro_tactical_20260409_165131_876505"),
("Behavioral Momentum", "behavioral_momentum", "mp_behavioral_momentum_20260409_165131_876499"),
("Low-Volatility", "low_volatility", "mp_smart_beta_passive_20260409_165131_876510"),
("Equal-Weight", "equal_weight", "mp_equal_weight_index_20260409_165131_876514"),
]
EXP1_CN = [
("Fundamental Value", "fundamental_value", "mp_fundamental_value_20260413_163905_878961"),
("Macro Tactical", "macro_tactical", "mp_macro_tactical_20260413_163905_878979"),
("Behavioral Momentum", "behavioral_momentum", "mp_behavioral_momentum_20260413_163905_878974"),
("Low-Volatility", "low_volatility", "mp_smart_beta_passive_20260413_163905_878984"),
("Equal-Weight", "equal_weight", "mp_equal_weight_index_20260413_163905_878987"),
]
EXP2_US_R2 = [
("Fundamental Value", "fundamental_value", "mp_fundamental_value_20260425_035029_413185"),
("Macro Tactical", "macro_tactical", "mp_macro_tactical_20260425_035029_413208"),
("Behavioral Momentum", "behavioral_momentum", "mp_behavioral_momentum_20260425_035029_413202"),
("Low-Volatility", "low_volatility", "mp_smart_beta_passive_20260425_035029_413212"),
("Equal-Weight", "equal_weight", "mp_equal_weight_index_20260425_035029_413216"),
]
# (display, mandate-variant dir, run_id, mandate, variant)
EXP3 = [
("FV Full", "fundamental_value_full", "20260417_160942", "fundamental_value", "full"),
("FV No filter", "fundamental_value_no_filter", "20260417_191443", "fundamental_value", "no_filter"),
("BM Full", "behavioral_momentum_full", "20260417_220721", "behavioral_momentum", "full"),
("BM No guardrails", "behavioral_momentum_no_guardrails","20260418_004927", "behavioral_momentum", "no_guardrails"),
("MT Full", "macro_tactical_full", "20260418_033940", "macro_tactical", "full"),
("MT No tilt", "macro_tactical_no_tilt", "20260418_072633", "macro_tactical", "no_tilt"),
("LV Reference", "low_volatility_reference", "20260418_110947", "low_volatility", "reference"),
("EqW Reference", "equal_weight_reference", "20260418_111210", "equal_weight", "reference"),
]
EXP4 = [
("Fundamental Value", "fundamental_value", "20260423_124148"),
("Macro Tactical", "macro_tactical", "20260424_094955"),
("Behavioral Momentum", "behavioral_momentum", "20260424_132728_372104"),
("Low-Volatility", "low_volatility", "20260424_094956"),
("Equal-Weight", "equal_weight", "20260424_132723_271683"),
]
def copy_run(src_run_id: str, dst_dir: Path) -> dict:
"""Copy a run dir's artifacts; return summary dict."""
src = SRC_BACKTEST / src_run_id
if not src.exists():
return {"src_run_id": src_run_id, "status": "missing"}
dst_dir.mkdir(parents=True, exist_ok=True)
files_copied = []
for f in src.iterdir():
if f.is_file():
shutil.copy2(f, dst_dir / f.name)
files_copied.append(f.name)
# Record raw run metadata
metrics_file = dst_dir / "metrics.json"
if metrics_file.exists():
m = json.loads(metrics_file.read_text())
return {
"src_run_id": src_run_id,
"start_date": m.get("start_date"),
"end_date": m.get("end_date"),
"market": m.get("market"),
"personality": m.get("config", {}).get("personality"),
"total_return": m["metrics"].get("total_return"),
"total_trades": m["metrics"].get("total_trades"),
"files": files_copied,
"status": "ok",
}
return {"src_run_id": src_run_id, "files": files_copied, "status": "ok"}
def build_manifest() -> dict:
"""Build the full manifest while copying everything."""
manifest: dict = {
"version": "1.0",
"title": "QuantArena artifact bundle",
"source_commit": "a4bf9e6 (sector matrix fix) / f2c9921 (audit manifest)",
"experiments": {},
}
# --- Exp 1 US ---
exp1_us = {"description": "Six-month main case study (US, Sep 1 2025 – Feb 28 2026, 124 trading days, $100,000 initial)", "runs": {}}
for label, dirn, rid in EXP1_US:
info = copy_run(rid, RELEASE / "runs" / "exp1_caseStudy_us_6m" / dirn)
info["display_name"] = label
info["bundle_path"] = f"runs/exp1_caseStudy_us_6m/{dirn}"
exp1_us["runs"][dirn] = info
manifest["experiments"]["exp1_caseStudy_us_6m"] = exp1_us
# --- Exp 1 CN ---
exp1_cn = {"description": "Six-month main case study (CN A-share, Sep 1 2025 – Feb 28 2026, 102 trading days, $100,000 initial)", "runs": {}}
for label, dirn, rid in EXP1_CN:
info = copy_run(rid, RELEASE / "runs" / "exp1_caseStudy_cn_6m" / dirn)
info["display_name"] = label
info["bundle_path"] = f"runs/exp1_caseStudy_cn_6m/{dirn}"
exp1_cn["runs"][dirn] = info
manifest["experiments"]["exp1_caseStudy_cn_6m"] = exp1_cn
# --- Exp 2 reproducibility ---
exp2 = {"description": "US 6M reproducibility re-run (Run 2). Independent re-run of all five mandates with identical config to Exp 1.", "runs": {}}
for label, dirn, rid in EXP2_US_R2:
info = copy_run(rid, RELEASE / "runs" / "exp2_reproducibility_us_6m_run2" / dirn)
info["display_name"] = label
info["bundle_path"] = f"runs/exp2_reproducibility_us_6m_run2/{dirn}"
exp2["runs"][dirn] = info
manifest["experiments"]["exp2_reproducibility_us_6m_run2"] = exp2
# --- Exp 3 mechanism ablation ---
exp3 = {"description": "US 3M mechanism ablation (Dec 1 2025 – Feb 28 2026). For each LLM-conditioned mandate, a Full variant and one Ablated variant.", "runs": {}}
for label, dirn, rid, mandate, variant in EXP3:
info = copy_run(rid, RELEASE / "runs" / "exp3_mechanism_ablation_us_3m" / dirn)
info["display_name"] = label
info["mandate"] = mandate
info["variant"] = variant
info["bundle_path"] = f"runs/exp3_mechanism_ablation_us_3m/{dirn}"
exp3["runs"][dirn] = info
manifest["experiments"]["exp3_mechanism_ablation_us_3m"] = exp3
# --- Exp 4 backend robustness ---
exp4 = {"description": "US 3M backend robustness: GPT-5.4 replaces DeepSeek-V3.2 reasoning backend; everything else fixed.", "runs": {}}
for label, dirn, rid in EXP4:
info = copy_run(rid, RELEASE / "runs" / "exp4_backend_robustness_us_3m_gpt54" / dirn)
info["display_name"] = label
info["bundle_path"] = f"runs/exp4_backend_robustness_us_3m_gpt54/{dirn}"
exp4["runs"][dirn] = info
manifest["experiments"]["exp4_backend_robustness_us_3m_gpt54"] = exp4
# --- Exp 5: docs only ---
exp5_dir = RELEASE / "exp5_efficiency_ablation_cn_10t_6m"
exp5_dir.mkdir(parents=True, exist_ok=True)
src_md = SRC_DOCS / "2026-03-23-cn-10tickers-6m-efficiency-ablation.md"
if src_md.exists():
shutil.copy2(src_md, exp5_dir / "source_doc.md")
(exp5_dir / "README.md").write_text(
"# Exp 5 — Execution efficiency ablation (CN, 10 tickers, 6M)\n\n"
"This experiment compares three execution paths (E0 baseline / E1 cold-cache / E2 warm-cache) "
"to demonstrate runtime/cost reductions enabled by shared phase-one preprocessing.\n\n"
"The original run artifacts are not redistributed in this bundle. "
"All numbers cited in the paper come from `source_doc.md` (copied here verbatim from "
"`latex/docs/2026-03-23-cn-10tickers-6m-efficiency-ablation.md`).\n"
)
manifest["experiments"]["exp5_efficiency_ablation_cn_10t_6m"] = {
"description": "Execution efficiency ablation (E0/E1/E2). Documented only — run artifacts not redistributed.",
"source_doc": "exp5_efficiency_ablation_cn_10t_6m/source_doc.md",
}
# --- Comparison bundles ---
comp_us = SRC_MP / "20260409_165121_176781"
comp_cn = SRC_MP / "20260413_163854_715331"
for label, src in [("us_6m", comp_us), ("cn_6m", comp_cn)]:
dst = RELEASE / "comparisons" / label
dst.mkdir(parents=True, exist_ok=True)
for f in src.iterdir():
if f.is_file():
shutil.copy2(f, dst / f.name)
manifest["comparisons"] = {
"us_6m": {
"description": "US 6M main case-study cross-mandate comparison bundle",
"source": "reports/multi_personality/20260409_165121_176781",
"bundle_path": "comparisons/us_6m",
},
"cn_6m": {
"description": "CN 6M main case-study cross-mandate comparison bundle",
"source": "reports/multi_personality/20260413_163854_715331",
"bundle_path": "comparisons/cn_6m",
},
}
# --- Universe ---
universe_dst = RELEASE / "universe"
universe_dst.mkdir(parents=True, exist_ok=True)
universe_src = SRC_INGEST / "universe" / "sector_style_universe.csv"
if universe_src.exists():
shutil.copy2(universe_src, universe_dst / "sector_style_universe.csv")
manifest["universe"] = {
"description": "5x4 sector/style universe (20 US + 20 CN tickers)",
"bundle_path": "universe/sector_style_universe.csv",
}
# --- GPT-5.4 robustness CSVs ---
gpt54_src = SRC_INGEST / "gpt54_robustness"
gpt54_dst = RELEASE / "derived" / "gpt54_robustness"
gpt54_dst.mkdir(parents=True, exist_ok=True)
if gpt54_src.exists():
for f in gpt54_src.iterdir():
if f.is_file():
shutil.copy2(f, gpt54_dst / f.name)
manifest["derived"] = {
"gpt54_robustness": "Pre-aggregated CSVs for the GPT-5.4 backend robustness analysis",
}
# --- Audit (mirror) ---
audit_dst = RELEASE / "audit"
audit_dst.mkdir(parents=True, exist_ok=True)
if SRC_AUDIT.exists():
for f in SRC_AUDIT.iterdir():
if f.is_file():
shutil.copy2(f, audit_dst / f.name)
manifest["audit"] = "Reproducibility manifest (mirror of latex/audit/)"
return manifest
def write_all_metrics(manifest: dict) -> int:
"""Write derived/all_metrics.csv as a long-format flat table."""
rows = []
metric_keys = [
"total_return", "annualized_return", "max_drawdown", "max_drawdown_duration",
"sharpe_ratio", "sortino_ratio", "volatility", "total_trades", "trading_days",
"avg_position_days", "win_rate", "initial_cash", "final_value", "final_cash",
"benchmark_total_return", "excess_return", "tracking_error", "information_ratio",
"beta", "alpha", "avg_cash_ratio", "avg_gross_exposure",
"value_filter_pass_rate", "value_consistency_score",
]
for exp_name, exp in manifest["experiments"].items():
if "runs" not in exp:
continue
for run_dirname, info in exp["runs"].items():
metrics_path = RELEASE / info["bundle_path"] / "metrics.json"
if not metrics_path.exists():
continue
m = json.loads(metrics_path.read_text())
base = {
"experiment": exp_name,
"mandate_dir": run_dirname,
"display_name": info.get("display_name"),
"src_run_id": info.get("src_run_id"),
"market": m.get("market"),
"start_date": m.get("start_date"),
"end_date": m.get("end_date"),
"personality": m.get("config", {}).get("personality"),
}
mandate = info.get("mandate")
variant = info.get("variant")
if mandate is not None:
base["mandate"] = mandate
if variant is not None:
base["variant"] = variant
for k in metric_keys:
base[k] = m["metrics"].get(k)
rows.append(base)
if not rows:
return 0
out = RELEASE / "derived" / "all_metrics.csv"
out.parent.mkdir(parents=True, exist_ok=True)
fieldnames = list(rows[0].keys())
# Ensure stable column order across rows
all_keys = sorted({k for r in rows for k in r.keys()},
key=lambda k: (k not in fieldnames, fieldnames.index(k) if k in fieldnames else 0))
# simpler: use union with priority order
priority = ["experiment", "market", "mandate_dir", "display_name", "mandate", "variant",
"src_run_id", "personality", "start_date", "end_date"] + metric_keys
all_keys = [k for k in priority if any(k in r for r in rows)]
with out.open("w") as f:
w = csv.DictWriter(f, fieldnames=all_keys, extrasaction="ignore")
w.writeheader()
for r in rows:
w.writerow(r)
return len(rows)
def write_all_trades(manifest: dict) -> int:
"""Concatenate all trades.csv into a single flat table."""
rows = []
for exp_name, exp in manifest["experiments"].items():
if "runs" not in exp:
continue
for run_dirname, info in exp["runs"].items():
trades_path = RELEASE / info["bundle_path"] / "trades.csv"
if not trades_path.exists():
continue
with trades_path.open() as f:
for r in csv.DictReader(f):
rows.append({
"experiment": exp_name,
"mandate_dir": run_dirname,
"display_name": info.get("display_name"),
"src_run_id": info.get("src_run_id"),
**r,
})
if not rows:
return 0
out = RELEASE / "derived" / "all_trades.csv"
out.parent.mkdir(parents=True, exist_ok=True)
fieldnames = ["experiment", "mandate_dir", "display_name", "src_run_id",
"date", "ticker", "action", "shares", "price", "value", "justification"]
with out.open("w") as f:
w = csv.DictWriter(f, fieldnames=fieldnames, extrasaction="ignore")
w.writeheader()
for r in rows:
w.writerow(r)
return len(rows)
def main() -> None:
if RELEASE.exists():
print(f"Cleaning existing {RELEASE}")
shutil.rmtree(RELEASE)
RELEASE.mkdir(parents=True, exist_ok=True)
print("Building manifest and copying artifacts...")
manifest = build_manifest()
(RELEASE / "manifest.json").write_text(json.dumps(manifest, indent=2, default=str))
print("Building derived/all_metrics.csv...")
n_metrics = write_all_metrics(manifest)
print(f" -> {n_metrics} rows")
print("Building derived/all_trades.csv...")
n_trades = write_all_trades(manifest)
print(f" -> {n_trades} rows")
# Final tree summary
print("\nFinal layout:")
total_size = 0
total_files = 0
for p in sorted(RELEASE.rglob("*")):
if p.is_file():
total_files += 1
total_size += p.stat().st_size
print(f" {total_files} files, {total_size/1024/1024:.2f} MB")
# v1 intentionally excludes benchmark-relative fields; see audit/README.md
# "Metric scope (v1)".
import subprocess as _sp
import sys as _sys
strip_script = SCRIPT_DIR / "strip_benchmark_relative_metrics.py"
if strip_script.exists():
print()
print("Running strip_benchmark_relative_metrics.py to enforce v1 schema...")
_sp.run([_sys.executable, str(strip_script)], check=True)
if __name__ == "__main__":
main()