MacroLens / code /build_valuation_tasks.py
itouchz's picture
Add files using upload-large-folder tool
5995ef5 verified
"""Valuation benchmark: task definitions and ground-truth construction.
Produces the artifacts required for the benchmark tasks T2-T7:
T2 - Company Valuation Accuracy (public company, all observables)
T3 - Financial Statement Generation Quality
T4 - Scenario-Conditioned Forecasting (ground truth only; scenarios
themselves are produced by `generate_scenarios.py`)
T5 - Private Company Valuation (PE simulation, financials + sector only)
T6 - Generator Evaluation (NL description -> XBRL fields)
T7 - Real Estate Valuation
Called from `assemble_benchmark.py` as the final Layer-3 build step.
Lives at the top level of `whatif_bench/` -- a peer of the other
benchmark builders (`assemble_benchmark.py`, `generate_scenarios.py`,
`enrich_benchmark.py`, `build_ontology.py`). NOT under `agents/`:
agents USE the benchmark, they don't BUILD it.
Usage:
from projects.agent_builder.scripts.whatif_bench.build_valuation_tasks import (
build_valuation_benchmark,
)
summary = build_valuation_benchmark(granularity="daily")
"""
from __future__ import annotations
import json
import logging
from pathlib import Path
from typing import Any
import numpy as np
import pandas as pd
from . import config
logger = logging.getLogger(__name__)
# Columns that are algebraically equivalent to (or directly reveal)
# market capitalisation. These MUST be excluded from any valuation-task
# input whose target is actual_market_cap, otherwise the task degenerates
# into trivial recovery. Used by Task A (and Task D's broader strip).
# derived_pe = market_cap / earnings
# derived_ev = market_cap + debt - cash
# derived_ev_to_revenue = ev / revenue (reveals mcap)
# derived_ev_to_ebitda = ev / ebitda (reveals mcap)
# derived_pb = market_cap / book_equity
# derived_price_to_book = alias for derived_pb
# derived_fcf_yield = fcf / market_cap
_MARKET_CAP_LEAKAGE_COLS: frozenset[str] = frozenset({
"derived_market_cap",
"derived_pe",
"derived_ev",
"derived_ev_to_revenue",
"derived_ev_to_ebitda",
"derived_pb",
"derived_price_to_book",
"derived_fcf_yield",
})
# ===================================================================
# Task A: Company Valuation Ground Truth
# ===================================================================
def _build_task_a(
panel: pd.DataFrame,
company_info: pd.DataFrame,
holdout_tickers: list[str],
output_dir: Path,
) -> dict[str, Any]:
"""Build Task A: estimate intrinsic value of public companies.
For each quarterly boundary × ticker, create:
- input: company description, sector, industry, recent financials
- target: actual market cap (hidden)
"""
# Quarterly boundaries (resample to quarter-end dates)
if "derived_market_cap" not in panel.columns:
logger.warning("derived_market_cap not in panel; skipping Task A")
return {"error": "No market cap data"}
# Use panel data at quarterly frequency
quarterly = panel.copy()
quarterly["date"] = pd.to_datetime(quarterly["date"])
quarterly["quarter"] = quarterly["date"].dt.to_period("Q")
# Take last observation per ticker × quarter
quarterly = quarterly.sort_values("date").drop_duplicates(
subset=["ticker", "quarter"], keep="last",
)
# Split: holdout tickers = evaluation, rest = context
eval_mask = quarterly["ticker"].isin(holdout_tickers)
eval_df = quarterly[eval_mask].copy()
if eval_df.empty:
return {"error": "No holdout tickers found in panel"}
# Build inputs (observable data). derived_* columns that are
# algebraic functions of market_cap are excluded -- they would let any
# model recover the target trivially. See _MARKET_CAP_LEAKAGE_COLS
# at the top of this module for the rationale per column.
input_cols = ["ticker", "date", "sector", "industry"]
for c in quarterly.columns:
if c in _MARKET_CAP_LEAKAGE_COLS:
continue
if c.startswith("derived_") or c.startswith("stmt_"):
input_cols.append(c)
input_cols = [c for c in input_cols if c in eval_df.columns]
inputs = eval_df[input_cols].copy()
# Build ground truth (hidden)
gt = eval_df[["ticker", "date", "derived_market_cap"]].copy()
gt = gt.rename(columns={"derived_market_cap": "actual_market_cap"})
gt = gt.dropna(subset=["actual_market_cap"])
# Save
inputs.to_parquet(output_dir / "valuation_inputs.parquet", index=False)
gt.to_parquet(output_dir / "valuation_ground_truth.parquet", index=False)
return {
"n_tickers": gt["ticker"].nunique(),
"n_instances": len(gt),
"date_range": [str(gt["date"].min()), str(gt["date"].max())],
}
# ===================================================================
# Task B: Financial Statement Generation Ground Truth
# ===================================================================
def _build_task_b(
company_info: pd.DataFrame,
holdout_tickers: list[str],
output_dir: Path,
) -> dict[str, Any]:
"""Build Task B: generate plausible financial statements.
For holdout tickers, the latest XBRL filings serve as ground truth.
Input: company profile (sector, industry, size description).
Target: actual XBRL financial statements.
"""
# Load XBRL company tags (latest values)
tags_path = config.XBRL_DIR / "parsed" / "company_tags.parquet"
if not tags_path.exists():
logger.warning("XBRL company_tags not found; skipping Task B")
return {"error": "No XBRL data"}
tags = pd.read_parquet(tags_path)
holdout_tags = tags[tags["ticker"].isin(holdout_tickers)]
if holdout_tags.empty:
return {"error": "No XBRL tags for holdout tickers"}
# Input: company descriptions
inputs = company_info[company_info["ticker"].isin(holdout_tickers)].copy()
if inputs.empty:
inputs = pd.DataFrame({"ticker": holdout_tickers})
# Ground truth: XBRL tags (field/value pairs)
gt_rows = []
for _, row in holdout_tags.iterrows():
gt_rows.append({
"ticker": row["ticker"],
"field": row["tag"],
"value": row["value"],
"taxonomy": row.get("taxonomy", ""),
"unit": row.get("unit", ""),
"fiscal_year": row.get("fiscal_year"),
})
gt = pd.DataFrame(gt_rows)
# Save
inputs.to_parquet(output_dir / "generation_inputs.parquet", index=False)
gt.to_parquet(output_dir / "generation_ground_truth.parquet", index=False)
return {
"n_tickers": gt["ticker"].nunique(),
"n_fields": gt["field"].nunique(),
"n_instances": len(gt),
}
# ===================================================================
# Task C: Scenario-Conditioned Forecasting Ground Truth
# ===================================================================
def _build_task_c(
panel: pd.DataFrame,
scenarios: pd.DataFrame,
output_dir: Path,
) -> dict[str, Any]:
"""Build Task C: forecast financial impact of what-if scenarios.
Extends existing scenarios with actual post-event changes in:
- price return (already in scenarios)
- revenue change (from panel statements)
- market cap change
Vectorised: pre-groups panel by ticker, then uses numpy searchsorted
to avoid O(scenarios × tickers × rows) repeated DataFrame filtering.
"""
if scenarios.empty:
return {"error": "No scenarios"}
panel = panel.copy()
panel["date"] = pd.to_datetime(panel["date"])
has_mcap = "derived_market_cap" in panel.columns
# Pre-group: store sorted arrays per ticker (avoids repeated filtering)
ticker_arrays: dict[str, dict] = {}
for ticker, grp in panel.groupby("ticker", sort=False):
grp = grp.sort_values("date")
td = {
"dates": grp["date"].values.astype("int64"),
"close": grp["close"].values,
}
if has_mcap:
td["mcap"] = grp["derived_market_cap"].values
ticker_arrays[ticker] = td
# Pre-extract scenario arrays
n_sc = len(scenarios)
sc_ids = scenarios["scenario_id"].values
sc_types = scenarios["event_type"].values if "event_type" in scenarios.columns else [""] * n_sc
sc_event_dates = pd.to_datetime(scenarios["event_date"]).values.astype("int64")
sc_pre_starts = pd.to_datetime(
scenarios.get("pre_window_start", scenarios["event_date"])
).values.astype("int64")
sc_post_ends = pd.to_datetime(
scenarios.get("post_window_end", scenarios["event_date"])
).values.astype("int64")
gt_rows = []
for ticker, td in ticker_arrays.items():
dates = td["dates"]
close = td["close"]
mcap = td.get("mcap")
for i in range(n_sc):
ev_ns = sc_event_dates[i]
pre_ns = sc_pre_starts[i]
post_ns = sc_post_ends[i]
# Pre: last index where pre_start <= date < event_date
pre_lo = np.searchsorted(dates, pre_ns, side="left")
pre_hi = np.searchsorted(dates, ev_ns, side="left")
if pre_hi <= pre_lo:
continue
pre_idx = pre_hi - 1
# Post: last index where event_date < date <= post_end
post_lo = np.searchsorted(dates, ev_ns, side="right")
post_hi = np.searchsorted(dates, post_ns, side="right")
if post_hi <= post_lo:
continue
post_idx = post_hi - 1
pre_price = float(close[pre_idx])
post_price = float(close[post_idx])
# Exclude penny-stock data points (pre-event price < $0.50): a
# one-cent move at $0.001 produces a 1000% "return" that's
# float noise rather than scenario response. The cutoff matches
# the SEC's penny-stock threshold and removes ~145 of 4.1M rows
# that account for all returns >|10000%|.
if pre_price < 0.50:
continue
price_return = (post_price / pre_price - 1) * 100
mcap_return = np.nan
if mcap is not None:
pre_m = float(mcap[pre_idx])
post_m = float(mcap[post_idx])
if not np.isnan(pre_m) and pre_m > 0:
mcap_return = (post_m / pre_m - 1) * 100
gt_rows.append({
"scenario_id": sc_ids[i],
"event_type": sc_types[i],
"event_date": str(pd.Timestamp(ev_ns).date()),
"ticker": ticker,
"actual_return_pct": round(price_return, 3) if not np.isnan(price_return) else None,
"actual_mcap_change_pct": round(mcap_return, 3) if not np.isnan(mcap_return) else None,
"pre_price": round(pre_price, 2),
"post_price": round(post_price, 2),
})
if not gt_rows:
return {"error": "No scenario × ticker pairs with data"}
gt = pd.DataFrame(gt_rows)
gt.to_parquet(output_dir / "scenario_forecast_ground_truth.parquet", index=False)
return {
"n_scenarios": gt["scenario_id"].nunique(),
"n_tickers": gt["ticker"].nunique(),
"n_instances": len(gt),
"event_types": gt["event_type"].value_counts().to_dict(),
}
# ===================================================================
# Task E: Generator Evaluation (Financial Generation Quality)
# ===================================================================
# Maps Generator output columns to XBRL ground-truth tag names.
# The Generator produces columns like "revenue", "net_income", etc.
# while XBRL ground truth uses US-GAAP tag names like "Revenues",
# "NetIncomeLoss", etc. This mapping bridges the two.
_GENERATOR_TO_XBRL: dict[str, list[str]] = {
"revenue": ["Revenues", "RevenueFromContractWithCustomerExcludingAssessedTax", "SalesRevenueNet"],
"net_income": ["NetIncomeLoss"],
"gross_profit": ["GrossProfit"],
"operating_income": ["OperatingIncomeLoss"],
"total_assets": ["Assets"],
"total_equity": ["StockholdersEquity", "StockholdersEquityIncludingPortionAttributableToNoncontrollingInterest"],
"total_debt": ["LongTermDebt", "LongTermDebtNoncurrent"],
"cash_and_equivalents": ["CashAndCashEquivalentsAtCarryingValue", "CashCashEquivalentsRestrictedCashAndRestrictedCashEquivalents"],
"operating_cash_flow": ["NetCashProvidedByUsedInOperatingActivities"],
"capital_expenditure": ["PaymentsToAcquirePropertyPlantAndEquipment"],
"interest_expense": ["InterestExpense"],
"ebitda": ["EBITDA"], # often not a direct XBRL tag; may need derivation
}
def _build_task_e(
company_info: pd.DataFrame,
holdout_tickers: list[str],
output_dir: Path,
) -> dict[str, Any]:
"""Build Task E: evaluate financial generation quality.
For each holdout ticker, the task is: given only the company's sector,
industry, and a text description → generate plausible financial
statements. Ground truth comes from actual XBRL filings.
This task evaluates the Generator agent's ability to produce realistic
financials for an unseen company, measured by per-field MAPE against
the latest actual filings.
The difference from Task B: Task B evaluates any model's field-level
predictions using raw XBRL tags. Task E specifically provides inputs
in the format the Generator agent expects (company description, sector)
and maps its output columns to XBRL ground truth, so the Generator
agent can be directly evaluated.
"""
# Load XBRL company tags (latest values)
tags_path = config.XBRL_DIR / "parsed" / "company_tags.parquet"
if not tags_path.exists():
logger.warning("XBRL company_tags not found; skipping Task E")
return {"error": "No XBRL data"}
tags = pd.read_parquet(tags_path)
holdout_tags = tags[tags["ticker"].isin(holdout_tickers)]
if holdout_tags.empty:
return {"error": "No XBRL tags for holdout tickers"}
# Build inputs: company profile in Generator-compatible format
input_rows = []
for ticker in holdout_tickers:
info_row = company_info[company_info["ticker"] == ticker]
if info_row.empty:
sector = "Unknown"
industry = "Unknown"
description = f"A company with ticker {ticker}"
else:
r = info_row.iloc[0]
sector = str(r.get("sector", "Unknown"))
industry = str(r.get("industry", "Unknown"))
employees = r.get("fullTimeEmployees", "")
description = (
f"A {sector} company in the {industry} industry"
+ (f" with approximately {int(employees)} employees" if employees and not pd.isna(employees) else "")
)
input_rows.append({
"ticker": ticker,
"sector": sector,
"industry": industry,
"company_description": description,
})
inputs = pd.DataFrame(input_rows)
# Build ground truth: map XBRL tags to Generator column names
gt_rows = []
for ticker in holdout_tickers:
tk_tags = holdout_tags[holdout_tags["ticker"] == ticker]
if tk_tags.empty:
continue
for gen_col, xbrl_tags in _GENERATOR_TO_XBRL.items():
for xbrl_tag in xbrl_tags:
match = tk_tags[tk_tags["tag"] == xbrl_tag]
if not match.empty:
# Take the latest value
latest = match.sort_values("fiscal_year", ascending=False).iloc[0]
gt_rows.append({
"ticker": ticker,
"generator_field": gen_col,
"xbrl_tag": xbrl_tag,
"value": latest["value"],
"fiscal_year": latest.get("fiscal_year"),
"unit": latest.get("unit", ""),
})
break # take first matching XBRL tag (priority order)
if not gt_rows:
return {"error": "No matching XBRL tags for Generator fields"}
gt = pd.DataFrame(gt_rows)
# Save
inputs.to_parquet(output_dir / "generator_eval_inputs.parquet", index=False)
gt.to_parquet(output_dir / "generator_eval_ground_truth.parquet", index=False)
logger.info(
"Task E (Generator Eval): %d tickers, %d field-value pairs, %d unique fields",
gt["ticker"].nunique(), len(gt), gt["generator_field"].nunique(),
)
return {
"n_tickers": gt["ticker"].nunique(),
"n_field_value_pairs": len(gt),
"n_unique_fields": gt["generator_field"].nunique(),
"fields": gt["generator_field"].value_counts().to_dict(),
}
# ===================================================================
# Task F: Real Estate Valuation (Rent/Price Estimation)
# ===================================================================
def _normalise_address(addr: str) -> str:
"""Normalise an address string for matching: lowercase, strip whitespace."""
if not isinstance(addr, str):
return ""
return " ".join(addr.lower().strip().split())
def _merge_rentals(
props: pd.DataFrame,
rentals: pd.DataFrame,
) -> pd.DataFrame:
"""Merge rental data into properties by normalised address or lat/lon proximity.
For each property row, attempt to find a matching rental listing.
Match strategy:
1. Exact normalised address match.
2. Lat/lon proximity (< 0.0005 degrees, roughly 50 m) for unmatched rows
that share the same zip code.
Returns the properties DataFrame with an added ``rent`` column.
"""
# --- Prepare normalised keys ---
props = props.copy()
rentals = rentals.copy()
# Identify the address column in each DataFrame
for col in ("formatted_address", "addressLine1", "addressFull", "address"):
if col in props.columns:
props["_norm_addr"] = props[col].apply(_normalise_address)
break
else:
props["_norm_addr"] = ""
for col in ("formatted_address", "addressLine1", "addressFull", "address"):
if col in rentals.columns:
rentals["_norm_addr"] = rentals[col].apply(_normalise_address)
break
else:
rentals["_norm_addr"] = ""
# Rename the rentals price column to rent
rent_price_col = "price" # rentals.csv uses "price" for monthly rent
if rent_price_col not in rentals.columns:
logger.warning("rentals.csv has no 'price' column; no rent data to merge")
props["rent"] = np.nan
props.drop(columns=["_norm_addr"], inplace=True)
return props
rentals["rent"] = pd.to_numeric(rentals[rent_price_col], errors="coerce")
# De-duplicate rentals: keep first (latest listing) per normalised address
rentals_dedup = (
rentals[rentals["_norm_addr"] != ""]
.drop_duplicates(subset=["_norm_addr"], keep="first")
)
# --- Strategy 1: exact normalised address merge ---
rent_lookup = rentals_dedup.set_index("_norm_addr")["rent"]
props["rent"] = props["_norm_addr"].map(rent_lookup)
n_addr_matched = props["rent"].notna().sum()
logger.info("Task F rent merge: %d/%d matched by address", n_addr_matched, len(props))
# --- Strategy 2: lat/lon proximity for unmatched rows ---
unmatched_mask = props["rent"].isna()
has_coords_props = (
unmatched_mask
& props.get("latitude", pd.Series(dtype=float)).notna()
& props.get("longitude", pd.Series(dtype=float)).notna()
)
if has_coords_props.any() and "latitude" in rentals.columns and "longitude" in rentals.columns:
# Build a lookup of rentals by zip for faster spatial matching
zip_col_r = "zip_code" if "zip_code" in rentals.columns else None
zip_col_p = "zip_code" if "zip_code" in props.columns else None
rentals_with_coords = rentals[
rentals["latitude"].notna() & rentals["longitude"].notna() & rentals["rent"].notna()
].copy()
if not rentals_with_coords.empty and zip_col_r and zip_col_p:
rental_groups = {
z: grp[["latitude", "longitude", "rent"]].values
for z, grp in rentals_with_coords.groupby(zip_col_r)
}
proximity_threshold = 0.0005 # ~50 m
for idx in props.index[has_coords_props]:
z = props.at[idx, zip_col_p] if zip_col_p else None
if z not in rental_groups:
continue
candidates = rental_groups[z] # shape (N, 3): lat, lon, rent
dlat = candidates[:, 0] - props.at[idx, "latitude"]
dlon = candidates[:, 1] - props.at[idx, "longitude"]
dist = np.sqrt(dlat ** 2 + dlon ** 2)
best = np.argmin(dist)
if dist[best] < proximity_threshold:
props.at[idx, "rent"] = candidates[best, 2]
n_geo_matched = props["rent"].notna().sum() - n_addr_matched
logger.info("Task F rent merge: %d additional matched by lat/lon proximity", n_geo_matched)
props.drop(columns=["_norm_addr"], inplace=True)
return props
def _build_task_f(
output_dir: Path,
) -> dict[str, Any]:
"""Build Task F: evaluate real estate rent and price estimation.
Uses collected RentCast data as ground truth. Loads both
``properties.csv`` (sale prices) and ``rentals.csv`` (monthly rents),
merges them by normalised address (with lat/lon proximity fallback),
and produces a combined dataset with both ``price`` and ``rent``
target columns.
For each property the task is: given location (metro), property type,
size (sqft, beds, baths), and year built, predict rent and/or price.
The holdout is a random 30 % of combined properties (seeded).
Training properties serve as the comps database.
"""
properties_path = config.REAL_ESTATE_DIR / "properties.csv"
rentals_path = config.REAL_ESTATE_DIR / "rentals.csv"
if not properties_path.exists() and not rentals_path.exists():
logger.warning("Neither properties.csv nor rentals.csv found; skipping Task F")
return {"error": "No real estate data"}
# ------------------------------------------------------------------
# 1. Load and standardise properties (sale price data)
# ------------------------------------------------------------------
if properties_path.exists():
props = pd.read_csv(properties_path)
else:
props = pd.DataFrame()
_rename_priority = [
("square_footage", "sqft"),
("squareFootage", "sqft"),
("propertyType", "property_type"),
("yearBuilt", "year_built"),
("last_sale_price", "price"),
("lastSalePrice", "price"),
("zipCode", "zip_code"),
("lotSize", "lot_size"),
# Address: prefer formatted_address > addressLine1
("formatted_address", "address"),
("addressLine1", "address"),
("addressFull", "address"),
]
for old_name, new_name in _rename_priority:
if old_name in props.columns and new_name not in props.columns:
props = props.rename(columns={old_name: new_name})
# Ensure numeric price
if "price" in props.columns:
props["price"] = pd.to_numeric(props["price"], errors="coerce")
# Remove non-positive prices (data errors)
neg_price = props["price"] <= 0
if neg_price.any():
logger.info("Task F: removing %d rows with non-positive price", neg_price.sum())
props = props[~neg_price | props["price"].isna()]
# Deduplicate by address (keep first occurrence)
if "address" in props.columns:
before = len(props)
props = props.drop_duplicates(subset=["address"], keep="first")
deduped = before - len(props)
if deduped > 0:
logger.info("Task F: deduplicated %d rows by address", deduped)
# ------------------------------------------------------------------
# 2. Load rentals and merge rent into properties
# ------------------------------------------------------------------
if rentals_path.exists():
rentals_raw = pd.read_csv(rentals_path)
if not rentals_raw.empty:
# Standardise rental column names the same way
for old_name, new_name in _rename_priority:
if old_name in rentals_raw.columns and new_name not in rentals_raw.columns:
rentals_raw = rentals_raw.rename(columns={old_name: new_name})
if not props.empty:
props = _merge_rentals(props, rentals_raw)
else:
# No properties file -- use rentals as the base
props = rentals_raw.copy()
props["rent"] = pd.to_numeric(props.get("price", pd.Series(dtype=float)), errors="coerce")
props["price"] = np.nan # no sale price available
# Append rental-only rows (addresses not already in props)
if not props.empty and "address" in props.columns:
existing_addrs = set(props["address"].apply(_normalise_address))
if "address" in rentals_raw.columns:
rentals_raw["_norm_addr"] = rentals_raw["address"].apply(_normalise_address)
new_rentals = rentals_raw[~rentals_raw["_norm_addr"].isin(existing_addrs)].copy()
new_rentals.drop(columns=["_norm_addr"], inplace=True)
if not new_rentals.empty:
new_rentals["rent"] = pd.to_numeric(
new_rentals.get("price", pd.Series(dtype=float)), errors="coerce",
)
# Avoid column clash: rentals "price" is rent, not sale price
if "price" in new_rentals.columns:
new_rentals = new_rentals.drop(columns=["price"])
new_rentals["price"] = np.nan # no sale price for rental-only rows
props = pd.concat([props, new_rentals], ignore_index=True)
logger.info("Task F: appended %d rental-only rows", len(new_rentals))
else:
# No rentals file -- price-only (existing behaviour)
props["rent"] = np.nan
if props.empty:
return {"error": "Empty real estate data after merge"}
# Ensure rent column exists
if "rent" not in props.columns:
props["rent"] = np.nan
# ------------------------------------------------------------------
# 3. Filter to properties with at least one target (rent or price)
# ------------------------------------------------------------------
props = props.dropna(subset=["price", "rent"], how="all")
if len(props) < 10:
return {"error": f"Too few properties with rent/price data ({len(props)})"}
# ------------------------------------------------------------------
# 3b. Per-property TIME-AXIS features
# ------------------------------------------------------------------
# Each property in the RentCast snapshot carries a `last_sale_date`
# (when it last changed hands). This timestamp is the per-property
# historical observation that gives T7 a time axis even though the
# train/test split itself is geographic (by address). Methods can use
# `last_sale_date` and `years_since_last_sale` as features alongside
# static attributes.
SCRAPE_DATE = pd.Timestamp("2026-04-11", tz="UTC")
if "last_sale_date" in props.columns:
props["last_sale_date"] = pd.to_datetime(
props["last_sale_date"], errors="coerce", utc=True,
)
props["years_since_last_sale"] = (
(SCRAPE_DATE - props["last_sale_date"]).dt.total_seconds() / (365.25 * 86400)
)
# ------------------------------------------------------------------
# 4. Address-holdout 70/30 split (seeded). T7 is a static valuation
# task -- the OOD signal is across properties, not across time --
# so the train/test cutoff is geographic. The time axis lives in
# the per-property features added in step 3b.
# ------------------------------------------------------------------
rng = np.random.RandomState(config.BENCHMARK_SEED)
holdout_mask = rng.random(len(props)) < 0.3
train_props = props[~holdout_mask].copy()
test_props = props[holdout_mask].copy()
# ------------------------------------------------------------------
# 5. Build inputs and ground truth
# ------------------------------------------------------------------
input_candidates = [
"address", "city", "state", "zip_code",
"property_type", "bedrooms", "bathrooms", "sqft",
"lotSize", "lot_size", "year_built", "county",
"latitude", "longitude",
# Per-property time-axis features (Option B: time-aware features
# alongside the static attributes; address-holdout split):
"last_sale_date", "years_since_last_sale",
]
input_cols = [c for c in input_candidates if c in test_props.columns]
inputs = test_props[input_cols].copy()
# Ground truth: address + both targets
gt_cols = []
if "address" in test_props.columns:
gt_cols.append("address")
gt_cols.extend(["price", "rent"])
gt = test_props[gt_cols].copy()
# ------------------------------------------------------------------
# 6. Save
# ------------------------------------------------------------------
train_props.to_parquet(output_dir / "re_train_properties.parquet", index=False)
inputs.to_parquet(output_dir / "re_eval_inputs.parquet", index=False)
gt.to_parquet(output_dir / "re_eval_ground_truth.parquet", index=False)
n_price = gt["price"].notna().sum()
n_rent = gt["rent"].notna().sum()
n_both = (gt["price"].notna() & gt["rent"].notna()).sum()
logger.info(
"Task F (RE Eval): %d train, %d test; price=%d, rent=%d, both=%d",
len(train_props), len(test_props), n_price, n_rent, n_both,
)
return {
"n_train": len(train_props),
"n_test": len(test_props),
"n_price": int(n_price),
"n_rent": int(n_rent),
"n_both": int(n_both),
"target_cols": ["price", "rent"],
"input_cols": input_cols,
}
# ===================================================================
# Task D: Private Company Valuation (PE Simulation)
# ===================================================================
# Columns derived from market price — must be stripped for private-company
# simulation because a PE analyst would not have access to market data.
_PRICE_DERIVED_COLS = {
"derived_market_cap", "derived_pe", "derived_ev", "derived_ev_to_revenue",
"derived_ev_to_ebitda", "derived_fcf_yield", "derived_pb",
"derived_price_to_book", "derived_debt_to_equity",
"close", "open", "high", "low", "volume", "adj_close",
"shares_outstanding",
}
def _build_task_d(
panel: pd.DataFrame,
company_info: pd.DataFrame,
holdout_tickers: list[str],
output_dir: Path,
) -> dict[str, Any]:
"""Build Task D: value an unseen company as if it were private.
Simulates the PE use case: the model trains on public companies where
all data (including market price) is available, but at test time it
receives ONLY what a PE analyst would have — financial statements,
sector, and industry. All price-derived columns are stripped from
the test inputs.
Same holdout tickers and ground truth as Task A, different input
columns.
"""
if "derived_market_cap" not in panel.columns:
logger.warning("derived_market_cap not in panel; skipping Task D")
return {"error": "No market cap data"}
# Use panel data at quarterly frequency
quarterly = panel.copy()
quarterly["date"] = pd.to_datetime(quarterly["date"])
quarterly["quarter"] = quarterly["date"].dt.to_period("Q")
# Take last observation per ticker × quarter
quarterly = quarterly.sort_values("date").drop_duplicates(
subset=["ticker", "quarter"], keep="last",
)
# Split: holdout tickers = evaluation
eval_mask = quarterly["ticker"].isin(holdout_tickers)
eval_df = quarterly[eval_mask].copy()
if eval_df.empty:
return {"error": "No holdout tickers found in panel"}
# Build inputs — ONLY what a PE analyst would have (no market data)
input_cols = ["ticker", "date", "sector", "industry"]
for c in quarterly.columns:
if c.startswith("stmt_"):
input_cols.append(c)
# Include non-price-derived fundamentals (e.g. derived_effective_tax_rate,
# derived_cost_of_debt, derived_beta, derived_wacc are computable from
# financial statements + macro data without market price — but beta and
# wacc require stock returns, so strip them too for a clean PE simulation)
input_cols = [c for c in input_cols if c in eval_df.columns
and c not in _PRICE_DERIVED_COLS]
inputs = eval_df[input_cols].copy()
# Ground truth — same as Task A
gt = eval_df[["ticker", "date", "derived_market_cap"]].copy()
gt = gt.rename(columns={"derived_market_cap": "actual_market_cap"})
gt = gt.dropna(subset=["actual_market_cap"])
# Save
inputs.to_parquet(output_dir / "private_valuation_inputs.parquet", index=False)
gt.to_parquet(output_dir / "private_valuation_ground_truth.parquet", index=False)
logger.info(
"Task D (Private Valuation): %d tickers, %d instances, %d input cols (no price data)",
gt["ticker"].nunique(), len(gt), len(input_cols),
)
return {
"n_tickers": gt["ticker"].nunique(),
"n_instances": len(gt),
"n_input_cols": len(input_cols),
"input_cols": input_cols,
"date_range": [str(gt["date"].min()), str(gt["date"].max())],
}
# ===================================================================
# Main entry point
# ===================================================================
def build_valuation_benchmark(
granularity: str = "daily",
) -> dict[str, Any]:
"""Build all valuation benchmark artifacts for a given granularity.
Reads from existing processed panel and benchmark data.
Writes to ``benchmark/{granularity}/``.
Returns summary dict with per-task statistics.
"""
bench_dir = config.get_benchmark_dir(granularity)
bench_dir.mkdir(parents=True, exist_ok=True)
# Load existing data
proc_dir = config.get_processed_dir(granularity)
panel_path = proc_dir / "panel.parquet"
if not panel_path.exists():
# Try CSV fallback
panel_path = proc_dir / "panel.csv"
if not panel_path.exists():
return {"error": f"No panel data at {proc_dir}"}
panel = pd.read_parquet(panel_path) if panel_path.suffix == ".parquet" else pd.read_csv(panel_path)
# Company info
info_path = config.FUNDAMENTALS_DIR / "company_info.csv"
company_info = pd.read_csv(info_path) if info_path.exists() else pd.DataFrame()
# Scenarios
scenarios_path = bench_dir / "scenarios.parquet"
scenarios = pd.read_parquet(scenarios_path) if scenarios_path.exists() else pd.DataFrame()
# Holdout tickers (random subset, seeded for reproducibility)
all_tickers = sorted(panel["ticker"].unique().tolist())
rng = np.random.RandomState(config.BENCHMARK_SEED)
n_holdout = max(1, int(len(all_tickers) * config.VALUATION_HOLDOUT_RATIO))
holdout_tickers = rng.choice(all_tickers, size=n_holdout, replace=False).tolist()
logger.info(
"Building valuation benchmark: %d total tickers, %d holdout",
len(all_tickers), len(holdout_tickers),
)
# Build each task
summary: dict[str, Any] = {
"granularity": granularity,
"n_tickers_total": len(all_tickers),
"n_holdout": len(holdout_tickers),
"holdout_tickers": holdout_tickers,
}
summary["task_a"] = _build_task_a(panel, company_info, holdout_tickers, bench_dir)
summary["task_b"] = _build_task_b(company_info, holdout_tickers, bench_dir)
summary["task_c"] = _build_task_c(panel, scenarios, bench_dir)
summary["task_d"] = _build_task_d(panel, company_info, holdout_tickers, bench_dir)
summary["task_e"] = _build_task_e(company_info, holdout_tickers, bench_dir)
summary["task_f"] = _build_task_f(bench_dir)
# Task definition JSON
task_def = {
"benchmark_name": "whatif_valuation_v1",
"tasks": {
"A_valuation_accuracy": {
"description": "Estimate intrinsic equity value of public companies",
"input": "valuation_inputs.parquet",
"ground_truth": "valuation_ground_truth.parquet",
"metrics": ["MAPE", "median_APE", "rank_correlation", "directional_accuracy"],
"primary_metric": "MAPE",
"target_col": "actual_market_cap",
},
"B_statement_generation": {
"description": "Generate plausible financial statements from company description",
"input": "generation_inputs.parquet",
"ground_truth": "generation_ground_truth.parquet",
"metrics": ["per_field_MAPE", "balance_equation_accuracy", "ontology_compliance"],
"primary_metric": "per_field_MAPE",
},
"C_scenario_forecast": {
"description": "Forecast financial impact of what-if scenarios",
"input": "scenarios.parquet",
"ground_truth": "scenario_forecast_ground_truth.parquet",
"metrics": ["return_MAE", "directional_accuracy", "CI_calibration"],
"primary_metric": "return_MAE",
},
"D_private_valuation": {
"description": "Value an unseen company using only financials + sector (PE simulation)",
"input": "private_valuation_inputs.parquet",
"ground_truth": "private_valuation_ground_truth.parquet",
"metrics": ["MAPE", "median_APE", "rank_correlation", "directional_accuracy"],
"primary_metric": "median_APE",
"target_col": "actual_market_cap",
"note": "Same holdout tickers as Task A but all price-derived columns stripped from inputs",
},
"E_generator_evaluation": {
"description": "Generate financial statements for unseen companies and compare to actual XBRL filings",
"input": "generator_eval_inputs.parquet",
"ground_truth": "generator_eval_ground_truth.parquet",
"metrics": ["per_field_MAPE", "balance_equation_accuracy"],
"primary_metric": "per_field_MAPE",
"note": "Evaluates the Generator agent's output against actual company financials",
},
"F_real_estate_valuation": {
"description": "Estimate rent and price for unseen properties given location and features",
"input": "re_eval_inputs.parquet",
"ground_truth": "re_eval_ground_truth.parquet",
"train_data": "re_train_properties.parquet",
"metrics": ["rent_MAPE", "price_MAPE"],
"primary_metric": "rent_MAPE",
"note": "70/30 random split of RentCast properties; train set serves as comps database",
},
},
"holdout_tickers": holdout_tickers,
}
(bench_dir / "valuation_tasks.json").write_text(
json.dumps(task_def, indent=2, default=str),
)
logger.info("Valuation benchmark complete: %s", summary)
return summary