| """Valuation benchmark: task definitions and ground-truth construction. |
| |
| Produces the artifacts required for the benchmark tasks T2-T7: |
| T2 - Company Valuation Accuracy (public company, all observables) |
| T3 - Financial Statement Generation Quality |
| T4 - Scenario-Conditioned Forecasting (ground truth only; scenarios |
| themselves are produced by `generate_scenarios.py`) |
| T5 - Private Company Valuation (PE simulation, financials + sector only) |
| T6 - Generator Evaluation (NL description -> XBRL fields) |
| T7 - Real Estate Valuation |
| |
| Called from `assemble_benchmark.py` as the final Layer-3 build step. |
| |
| Lives at the top level of `whatif_bench/` -- a peer of the other |
| benchmark builders (`assemble_benchmark.py`, `generate_scenarios.py`, |
| `enrich_benchmark.py`, `build_ontology.py`). NOT under `agents/`: |
| agents USE the benchmark, they don't BUILD it. |
| |
| Usage: |
| from projects.agent_builder.scripts.whatif_bench.build_valuation_tasks import ( |
| build_valuation_benchmark, |
| ) |
| summary = build_valuation_benchmark(granularity="daily") |
| """ |
|
|
| from __future__ import annotations |
|
|
| import json |
| import logging |
| from pathlib import Path |
| from typing import Any |
|
|
| import numpy as np |
| import pandas as pd |
|
|
| from . import config |
|
|
| logger = logging.getLogger(__name__) |
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| _MARKET_CAP_LEAKAGE_COLS: frozenset[str] = frozenset({ |
| "derived_market_cap", |
| "derived_pe", |
| "derived_ev", |
| "derived_ev_to_revenue", |
| "derived_ev_to_ebitda", |
| "derived_pb", |
| "derived_price_to_book", |
| "derived_fcf_yield", |
| }) |
|
|
|
|
| |
| |
| |
|
|
| def _build_task_a( |
| panel: pd.DataFrame, |
| company_info: pd.DataFrame, |
| holdout_tickers: list[str], |
| output_dir: Path, |
| ) -> dict[str, Any]: |
| """Build Task A: estimate intrinsic value of public companies. |
| |
| For each quarterly boundary × ticker, create: |
| - input: company description, sector, industry, recent financials |
| - target: actual market cap (hidden) |
| """ |
| |
| if "derived_market_cap" not in panel.columns: |
| logger.warning("derived_market_cap not in panel; skipping Task A") |
| return {"error": "No market cap data"} |
|
|
| |
| quarterly = panel.copy() |
| quarterly["date"] = pd.to_datetime(quarterly["date"]) |
| quarterly["quarter"] = quarterly["date"].dt.to_period("Q") |
|
|
| |
| quarterly = quarterly.sort_values("date").drop_duplicates( |
| subset=["ticker", "quarter"], keep="last", |
| ) |
|
|
| |
| eval_mask = quarterly["ticker"].isin(holdout_tickers) |
| eval_df = quarterly[eval_mask].copy() |
|
|
| if eval_df.empty: |
| return {"error": "No holdout tickers found in panel"} |
|
|
| |
| |
| |
| |
| input_cols = ["ticker", "date", "sector", "industry"] |
| for c in quarterly.columns: |
| if c in _MARKET_CAP_LEAKAGE_COLS: |
| continue |
| if c.startswith("derived_") or c.startswith("stmt_"): |
| input_cols.append(c) |
| input_cols = [c for c in input_cols if c in eval_df.columns] |
| inputs = eval_df[input_cols].copy() |
|
|
| |
| gt = eval_df[["ticker", "date", "derived_market_cap"]].copy() |
| gt = gt.rename(columns={"derived_market_cap": "actual_market_cap"}) |
| gt = gt.dropna(subset=["actual_market_cap"]) |
|
|
| |
| inputs.to_parquet(output_dir / "valuation_inputs.parquet", index=False) |
| gt.to_parquet(output_dir / "valuation_ground_truth.parquet", index=False) |
|
|
| return { |
| "n_tickers": gt["ticker"].nunique(), |
| "n_instances": len(gt), |
| "date_range": [str(gt["date"].min()), str(gt["date"].max())], |
| } |
|
|
|
|
| |
| |
| |
|
|
| def _build_task_b( |
| company_info: pd.DataFrame, |
| holdout_tickers: list[str], |
| output_dir: Path, |
| ) -> dict[str, Any]: |
| """Build Task B: generate plausible financial statements. |
| |
| For holdout tickers, the latest XBRL filings serve as ground truth. |
| Input: company profile (sector, industry, size description). |
| Target: actual XBRL financial statements. |
| """ |
| |
| tags_path = config.XBRL_DIR / "parsed" / "company_tags.parquet" |
| if not tags_path.exists(): |
| logger.warning("XBRL company_tags not found; skipping Task B") |
| return {"error": "No XBRL data"} |
|
|
| tags = pd.read_parquet(tags_path) |
| holdout_tags = tags[tags["ticker"].isin(holdout_tickers)] |
|
|
| if holdout_tags.empty: |
| return {"error": "No XBRL tags for holdout tickers"} |
|
|
| |
| inputs = company_info[company_info["ticker"].isin(holdout_tickers)].copy() |
| if inputs.empty: |
| inputs = pd.DataFrame({"ticker": holdout_tickers}) |
|
|
| |
| gt_rows = [] |
| for _, row in holdout_tags.iterrows(): |
| gt_rows.append({ |
| "ticker": row["ticker"], |
| "field": row["tag"], |
| "value": row["value"], |
| "taxonomy": row.get("taxonomy", ""), |
| "unit": row.get("unit", ""), |
| "fiscal_year": row.get("fiscal_year"), |
| }) |
|
|
| gt = pd.DataFrame(gt_rows) |
|
|
| |
| inputs.to_parquet(output_dir / "generation_inputs.parquet", index=False) |
| gt.to_parquet(output_dir / "generation_ground_truth.parquet", index=False) |
|
|
| return { |
| "n_tickers": gt["ticker"].nunique(), |
| "n_fields": gt["field"].nunique(), |
| "n_instances": len(gt), |
| } |
|
|
|
|
| |
| |
| |
|
|
| def _build_task_c( |
| panel: pd.DataFrame, |
| scenarios: pd.DataFrame, |
| output_dir: Path, |
| ) -> dict[str, Any]: |
| """Build Task C: forecast financial impact of what-if scenarios. |
| |
| Extends existing scenarios with actual post-event changes in: |
| - price return (already in scenarios) |
| - revenue change (from panel statements) |
| - market cap change |
| |
| Vectorised: pre-groups panel by ticker, then uses numpy searchsorted |
| to avoid O(scenarios × tickers × rows) repeated DataFrame filtering. |
| """ |
| if scenarios.empty: |
| return {"error": "No scenarios"} |
|
|
| panel = panel.copy() |
| panel["date"] = pd.to_datetime(panel["date"]) |
| has_mcap = "derived_market_cap" in panel.columns |
|
|
| |
| ticker_arrays: dict[str, dict] = {} |
| for ticker, grp in panel.groupby("ticker", sort=False): |
| grp = grp.sort_values("date") |
| td = { |
| "dates": grp["date"].values.astype("int64"), |
| "close": grp["close"].values, |
| } |
| if has_mcap: |
| td["mcap"] = grp["derived_market_cap"].values |
| ticker_arrays[ticker] = td |
|
|
| |
| n_sc = len(scenarios) |
| sc_ids = scenarios["scenario_id"].values |
| sc_types = scenarios["event_type"].values if "event_type" in scenarios.columns else [""] * n_sc |
| sc_event_dates = pd.to_datetime(scenarios["event_date"]).values.astype("int64") |
| sc_pre_starts = pd.to_datetime( |
| scenarios.get("pre_window_start", scenarios["event_date"]) |
| ).values.astype("int64") |
| sc_post_ends = pd.to_datetime( |
| scenarios.get("post_window_end", scenarios["event_date"]) |
| ).values.astype("int64") |
|
|
| gt_rows = [] |
| for ticker, td in ticker_arrays.items(): |
| dates = td["dates"] |
| close = td["close"] |
| mcap = td.get("mcap") |
|
|
| for i in range(n_sc): |
| ev_ns = sc_event_dates[i] |
| pre_ns = sc_pre_starts[i] |
| post_ns = sc_post_ends[i] |
|
|
| |
| pre_lo = np.searchsorted(dates, pre_ns, side="left") |
| pre_hi = np.searchsorted(dates, ev_ns, side="left") |
| if pre_hi <= pre_lo: |
| continue |
| pre_idx = pre_hi - 1 |
|
|
| |
| post_lo = np.searchsorted(dates, ev_ns, side="right") |
| post_hi = np.searchsorted(dates, post_ns, side="right") |
| if post_hi <= post_lo: |
| continue |
| post_idx = post_hi - 1 |
|
|
| pre_price = float(close[pre_idx]) |
| post_price = float(close[post_idx]) |
| |
| |
| |
| |
| |
| if pre_price < 0.50: |
| continue |
| price_return = (post_price / pre_price - 1) * 100 |
|
|
| mcap_return = np.nan |
| if mcap is not None: |
| pre_m = float(mcap[pre_idx]) |
| post_m = float(mcap[post_idx]) |
| if not np.isnan(pre_m) and pre_m > 0: |
| mcap_return = (post_m / pre_m - 1) * 100 |
|
|
| gt_rows.append({ |
| "scenario_id": sc_ids[i], |
| "event_type": sc_types[i], |
| "event_date": str(pd.Timestamp(ev_ns).date()), |
| "ticker": ticker, |
| "actual_return_pct": round(price_return, 3) if not np.isnan(price_return) else None, |
| "actual_mcap_change_pct": round(mcap_return, 3) if not np.isnan(mcap_return) else None, |
| "pre_price": round(pre_price, 2), |
| "post_price": round(post_price, 2), |
| }) |
|
|
| if not gt_rows: |
| return {"error": "No scenario × ticker pairs with data"} |
|
|
| gt = pd.DataFrame(gt_rows) |
| gt.to_parquet(output_dir / "scenario_forecast_ground_truth.parquet", index=False) |
|
|
| return { |
| "n_scenarios": gt["scenario_id"].nunique(), |
| "n_tickers": gt["ticker"].nunique(), |
| "n_instances": len(gt), |
| "event_types": gt["event_type"].value_counts().to_dict(), |
| } |
|
|
|
|
| |
| |
| |
|
|
| |
| |
| |
| |
| _GENERATOR_TO_XBRL: dict[str, list[str]] = { |
| "revenue": ["Revenues", "RevenueFromContractWithCustomerExcludingAssessedTax", "SalesRevenueNet"], |
| "net_income": ["NetIncomeLoss"], |
| "gross_profit": ["GrossProfit"], |
| "operating_income": ["OperatingIncomeLoss"], |
| "total_assets": ["Assets"], |
| "total_equity": ["StockholdersEquity", "StockholdersEquityIncludingPortionAttributableToNoncontrollingInterest"], |
| "total_debt": ["LongTermDebt", "LongTermDebtNoncurrent"], |
| "cash_and_equivalents": ["CashAndCashEquivalentsAtCarryingValue", "CashCashEquivalentsRestrictedCashAndRestrictedCashEquivalents"], |
| "operating_cash_flow": ["NetCashProvidedByUsedInOperatingActivities"], |
| "capital_expenditure": ["PaymentsToAcquirePropertyPlantAndEquipment"], |
| "interest_expense": ["InterestExpense"], |
| "ebitda": ["EBITDA"], |
| } |
|
|
|
|
| def _build_task_e( |
| company_info: pd.DataFrame, |
| holdout_tickers: list[str], |
| output_dir: Path, |
| ) -> dict[str, Any]: |
| """Build Task E: evaluate financial generation quality. |
| |
| For each holdout ticker, the task is: given only the company's sector, |
| industry, and a text description → generate plausible financial |
| statements. Ground truth comes from actual XBRL filings. |
| |
| This task evaluates the Generator agent's ability to produce realistic |
| financials for an unseen company, measured by per-field MAPE against |
| the latest actual filings. |
| |
| The difference from Task B: Task B evaluates any model's field-level |
| predictions using raw XBRL tags. Task E specifically provides inputs |
| in the format the Generator agent expects (company description, sector) |
| and maps its output columns to XBRL ground truth, so the Generator |
| agent can be directly evaluated. |
| """ |
| |
| tags_path = config.XBRL_DIR / "parsed" / "company_tags.parquet" |
| if not tags_path.exists(): |
| logger.warning("XBRL company_tags not found; skipping Task E") |
| return {"error": "No XBRL data"} |
|
|
| tags = pd.read_parquet(tags_path) |
| holdout_tags = tags[tags["ticker"].isin(holdout_tickers)] |
|
|
| if holdout_tags.empty: |
| return {"error": "No XBRL tags for holdout tickers"} |
|
|
| |
| input_rows = [] |
| for ticker in holdout_tickers: |
| info_row = company_info[company_info["ticker"] == ticker] |
| if info_row.empty: |
| sector = "Unknown" |
| industry = "Unknown" |
| description = f"A company with ticker {ticker}" |
| else: |
| r = info_row.iloc[0] |
| sector = str(r.get("sector", "Unknown")) |
| industry = str(r.get("industry", "Unknown")) |
| employees = r.get("fullTimeEmployees", "") |
| description = ( |
| f"A {sector} company in the {industry} industry" |
| + (f" with approximately {int(employees)} employees" if employees and not pd.isna(employees) else "") |
| ) |
|
|
| input_rows.append({ |
| "ticker": ticker, |
| "sector": sector, |
| "industry": industry, |
| "company_description": description, |
| }) |
|
|
| inputs = pd.DataFrame(input_rows) |
|
|
| |
| gt_rows = [] |
| for ticker in holdout_tickers: |
| tk_tags = holdout_tags[holdout_tags["ticker"] == ticker] |
| if tk_tags.empty: |
| continue |
| for gen_col, xbrl_tags in _GENERATOR_TO_XBRL.items(): |
| for xbrl_tag in xbrl_tags: |
| match = tk_tags[tk_tags["tag"] == xbrl_tag] |
| if not match.empty: |
| |
| latest = match.sort_values("fiscal_year", ascending=False).iloc[0] |
| gt_rows.append({ |
| "ticker": ticker, |
| "generator_field": gen_col, |
| "xbrl_tag": xbrl_tag, |
| "value": latest["value"], |
| "fiscal_year": latest.get("fiscal_year"), |
| "unit": latest.get("unit", ""), |
| }) |
| break |
|
|
| if not gt_rows: |
| return {"error": "No matching XBRL tags for Generator fields"} |
|
|
| gt = pd.DataFrame(gt_rows) |
|
|
| |
| inputs.to_parquet(output_dir / "generator_eval_inputs.parquet", index=False) |
| gt.to_parquet(output_dir / "generator_eval_ground_truth.parquet", index=False) |
|
|
| logger.info( |
| "Task E (Generator Eval): %d tickers, %d field-value pairs, %d unique fields", |
| gt["ticker"].nunique(), len(gt), gt["generator_field"].nunique(), |
| ) |
|
|
| return { |
| "n_tickers": gt["ticker"].nunique(), |
| "n_field_value_pairs": len(gt), |
| "n_unique_fields": gt["generator_field"].nunique(), |
| "fields": gt["generator_field"].value_counts().to_dict(), |
| } |
|
|
|
|
| |
| |
| |
|
|
| def _normalise_address(addr: str) -> str: |
| """Normalise an address string for matching: lowercase, strip whitespace.""" |
| if not isinstance(addr, str): |
| return "" |
| return " ".join(addr.lower().strip().split()) |
|
|
|
|
| def _merge_rentals( |
| props: pd.DataFrame, |
| rentals: pd.DataFrame, |
| ) -> pd.DataFrame: |
| """Merge rental data into properties by normalised address or lat/lon proximity. |
| |
| For each property row, attempt to find a matching rental listing. |
| Match strategy: |
| 1. Exact normalised address match. |
| 2. Lat/lon proximity (< 0.0005 degrees, roughly 50 m) for unmatched rows |
| that share the same zip code. |
| |
| Returns the properties DataFrame with an added ``rent`` column. |
| """ |
| |
| props = props.copy() |
| rentals = rentals.copy() |
|
|
| |
| for col in ("formatted_address", "addressLine1", "addressFull", "address"): |
| if col in props.columns: |
| props["_norm_addr"] = props[col].apply(_normalise_address) |
| break |
| else: |
| props["_norm_addr"] = "" |
|
|
| for col in ("formatted_address", "addressLine1", "addressFull", "address"): |
| if col in rentals.columns: |
| rentals["_norm_addr"] = rentals[col].apply(_normalise_address) |
| break |
| else: |
| rentals["_norm_addr"] = "" |
|
|
| |
| rent_price_col = "price" |
| if rent_price_col not in rentals.columns: |
| logger.warning("rentals.csv has no 'price' column; no rent data to merge") |
| props["rent"] = np.nan |
| props.drop(columns=["_norm_addr"], inplace=True) |
| return props |
|
|
| rentals["rent"] = pd.to_numeric(rentals[rent_price_col], errors="coerce") |
|
|
| |
| rentals_dedup = ( |
| rentals[rentals["_norm_addr"] != ""] |
| .drop_duplicates(subset=["_norm_addr"], keep="first") |
| ) |
|
|
| |
| rent_lookup = rentals_dedup.set_index("_norm_addr")["rent"] |
| props["rent"] = props["_norm_addr"].map(rent_lookup) |
|
|
| n_addr_matched = props["rent"].notna().sum() |
| logger.info("Task F rent merge: %d/%d matched by address", n_addr_matched, len(props)) |
|
|
| |
| unmatched_mask = props["rent"].isna() |
| has_coords_props = ( |
| unmatched_mask |
| & props.get("latitude", pd.Series(dtype=float)).notna() |
| & props.get("longitude", pd.Series(dtype=float)).notna() |
| ) |
|
|
| if has_coords_props.any() and "latitude" in rentals.columns and "longitude" in rentals.columns: |
| |
| zip_col_r = "zip_code" if "zip_code" in rentals.columns else None |
| zip_col_p = "zip_code" if "zip_code" in props.columns else None |
|
|
| rentals_with_coords = rentals[ |
| rentals["latitude"].notna() & rentals["longitude"].notna() & rentals["rent"].notna() |
| ].copy() |
|
|
| if not rentals_with_coords.empty and zip_col_r and zip_col_p: |
| rental_groups = { |
| z: grp[["latitude", "longitude", "rent"]].values |
| for z, grp in rentals_with_coords.groupby(zip_col_r) |
| } |
|
|
| proximity_threshold = 0.0005 |
|
|
| for idx in props.index[has_coords_props]: |
| z = props.at[idx, zip_col_p] if zip_col_p else None |
| if z not in rental_groups: |
| continue |
| candidates = rental_groups[z] |
| dlat = candidates[:, 0] - props.at[idx, "latitude"] |
| dlon = candidates[:, 1] - props.at[idx, "longitude"] |
| dist = np.sqrt(dlat ** 2 + dlon ** 2) |
| best = np.argmin(dist) |
| if dist[best] < proximity_threshold: |
| props.at[idx, "rent"] = candidates[best, 2] |
|
|
| n_geo_matched = props["rent"].notna().sum() - n_addr_matched |
| logger.info("Task F rent merge: %d additional matched by lat/lon proximity", n_geo_matched) |
|
|
| props.drop(columns=["_norm_addr"], inplace=True) |
| return props |
|
|
|
|
| def _build_task_f( |
| output_dir: Path, |
| ) -> dict[str, Any]: |
| """Build Task F: evaluate real estate rent and price estimation. |
| |
| Uses collected RentCast data as ground truth. Loads both |
| ``properties.csv`` (sale prices) and ``rentals.csv`` (monthly rents), |
| merges them by normalised address (with lat/lon proximity fallback), |
| and produces a combined dataset with both ``price`` and ``rent`` |
| target columns. |
| |
| For each property the task is: given location (metro), property type, |
| size (sqft, beds, baths), and year built, predict rent and/or price. |
| |
| The holdout is a random 30 % of combined properties (seeded). |
| Training properties serve as the comps database. |
| """ |
| properties_path = config.REAL_ESTATE_DIR / "properties.csv" |
| rentals_path = config.REAL_ESTATE_DIR / "rentals.csv" |
|
|
| if not properties_path.exists() and not rentals_path.exists(): |
| logger.warning("Neither properties.csv nor rentals.csv found; skipping Task F") |
| return {"error": "No real estate data"} |
|
|
| |
| |
| |
| if properties_path.exists(): |
| props = pd.read_csv(properties_path) |
| else: |
| props = pd.DataFrame() |
|
|
| _rename_priority = [ |
| ("square_footage", "sqft"), |
| ("squareFootage", "sqft"), |
| ("propertyType", "property_type"), |
| ("yearBuilt", "year_built"), |
| ("last_sale_price", "price"), |
| ("lastSalePrice", "price"), |
| ("zipCode", "zip_code"), |
| ("lotSize", "lot_size"), |
| |
| ("formatted_address", "address"), |
| ("addressLine1", "address"), |
| ("addressFull", "address"), |
| ] |
| for old_name, new_name in _rename_priority: |
| if old_name in props.columns and new_name not in props.columns: |
| props = props.rename(columns={old_name: new_name}) |
|
|
| |
| if "price" in props.columns: |
| props["price"] = pd.to_numeric(props["price"], errors="coerce") |
| |
| neg_price = props["price"] <= 0 |
| if neg_price.any(): |
| logger.info("Task F: removing %d rows with non-positive price", neg_price.sum()) |
| props = props[~neg_price | props["price"].isna()] |
|
|
| |
| if "address" in props.columns: |
| before = len(props) |
| props = props.drop_duplicates(subset=["address"], keep="first") |
| deduped = before - len(props) |
| if deduped > 0: |
| logger.info("Task F: deduplicated %d rows by address", deduped) |
|
|
| |
| |
| |
| if rentals_path.exists(): |
| rentals_raw = pd.read_csv(rentals_path) |
| if not rentals_raw.empty: |
| |
| for old_name, new_name in _rename_priority: |
| if old_name in rentals_raw.columns and new_name not in rentals_raw.columns: |
| rentals_raw = rentals_raw.rename(columns={old_name: new_name}) |
|
|
| if not props.empty: |
| props = _merge_rentals(props, rentals_raw) |
| else: |
| |
| props = rentals_raw.copy() |
| props["rent"] = pd.to_numeric(props.get("price", pd.Series(dtype=float)), errors="coerce") |
| props["price"] = np.nan |
|
|
| |
| if not props.empty and "address" in props.columns: |
| existing_addrs = set(props["address"].apply(_normalise_address)) |
| if "address" in rentals_raw.columns: |
| rentals_raw["_norm_addr"] = rentals_raw["address"].apply(_normalise_address) |
| new_rentals = rentals_raw[~rentals_raw["_norm_addr"].isin(existing_addrs)].copy() |
| new_rentals.drop(columns=["_norm_addr"], inplace=True) |
| if not new_rentals.empty: |
| new_rentals["rent"] = pd.to_numeric( |
| new_rentals.get("price", pd.Series(dtype=float)), errors="coerce", |
| ) |
| |
| if "price" in new_rentals.columns: |
| new_rentals = new_rentals.drop(columns=["price"]) |
| new_rentals["price"] = np.nan |
| props = pd.concat([props, new_rentals], ignore_index=True) |
| logger.info("Task F: appended %d rental-only rows", len(new_rentals)) |
| else: |
| |
| props["rent"] = np.nan |
|
|
| if props.empty: |
| return {"error": "Empty real estate data after merge"} |
|
|
| |
| if "rent" not in props.columns: |
| props["rent"] = np.nan |
|
|
| |
| |
| |
| props = props.dropna(subset=["price", "rent"], how="all") |
|
|
| if len(props) < 10: |
| return {"error": f"Too few properties with rent/price data ({len(props)})"} |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| SCRAPE_DATE = pd.Timestamp("2026-04-11", tz="UTC") |
| if "last_sale_date" in props.columns: |
| props["last_sale_date"] = pd.to_datetime( |
| props["last_sale_date"], errors="coerce", utc=True, |
| ) |
| props["years_since_last_sale"] = ( |
| (SCRAPE_DATE - props["last_sale_date"]).dt.total_seconds() / (365.25 * 86400) |
| ) |
|
|
| |
| |
| |
| |
| |
| |
| rng = np.random.RandomState(config.BENCHMARK_SEED) |
| holdout_mask = rng.random(len(props)) < 0.3 |
| train_props = props[~holdout_mask].copy() |
| test_props = props[holdout_mask].copy() |
|
|
| |
| |
| |
| input_candidates = [ |
| "address", "city", "state", "zip_code", |
| "property_type", "bedrooms", "bathrooms", "sqft", |
| "lotSize", "lot_size", "year_built", "county", |
| "latitude", "longitude", |
| |
| |
| "last_sale_date", "years_since_last_sale", |
| ] |
| input_cols = [c for c in input_candidates if c in test_props.columns] |
| inputs = test_props[input_cols].copy() |
|
|
| |
| gt_cols = [] |
| if "address" in test_props.columns: |
| gt_cols.append("address") |
| gt_cols.extend(["price", "rent"]) |
| gt = test_props[gt_cols].copy() |
|
|
| |
| |
| |
| train_props.to_parquet(output_dir / "re_train_properties.parquet", index=False) |
| inputs.to_parquet(output_dir / "re_eval_inputs.parquet", index=False) |
| gt.to_parquet(output_dir / "re_eval_ground_truth.parquet", index=False) |
|
|
| n_price = gt["price"].notna().sum() |
| n_rent = gt["rent"].notna().sum() |
| n_both = (gt["price"].notna() & gt["rent"].notna()).sum() |
|
|
| logger.info( |
| "Task F (RE Eval): %d train, %d test; price=%d, rent=%d, both=%d", |
| len(train_props), len(test_props), n_price, n_rent, n_both, |
| ) |
|
|
| return { |
| "n_train": len(train_props), |
| "n_test": len(test_props), |
| "n_price": int(n_price), |
| "n_rent": int(n_rent), |
| "n_both": int(n_both), |
| "target_cols": ["price", "rent"], |
| "input_cols": input_cols, |
| } |
|
|
|
|
| |
| |
| |
|
|
| |
| |
| _PRICE_DERIVED_COLS = { |
| "derived_market_cap", "derived_pe", "derived_ev", "derived_ev_to_revenue", |
| "derived_ev_to_ebitda", "derived_fcf_yield", "derived_pb", |
| "derived_price_to_book", "derived_debt_to_equity", |
| "close", "open", "high", "low", "volume", "adj_close", |
| "shares_outstanding", |
| } |
|
|
|
|
| def _build_task_d( |
| panel: pd.DataFrame, |
| company_info: pd.DataFrame, |
| holdout_tickers: list[str], |
| output_dir: Path, |
| ) -> dict[str, Any]: |
| """Build Task D: value an unseen company as if it were private. |
| |
| Simulates the PE use case: the model trains on public companies where |
| all data (including market price) is available, but at test time it |
| receives ONLY what a PE analyst would have — financial statements, |
| sector, and industry. All price-derived columns are stripped from |
| the test inputs. |
| |
| Same holdout tickers and ground truth as Task A, different input |
| columns. |
| """ |
| if "derived_market_cap" not in panel.columns: |
| logger.warning("derived_market_cap not in panel; skipping Task D") |
| return {"error": "No market cap data"} |
|
|
| |
| quarterly = panel.copy() |
| quarterly["date"] = pd.to_datetime(quarterly["date"]) |
| quarterly["quarter"] = quarterly["date"].dt.to_period("Q") |
|
|
| |
| quarterly = quarterly.sort_values("date").drop_duplicates( |
| subset=["ticker", "quarter"], keep="last", |
| ) |
|
|
| |
| eval_mask = quarterly["ticker"].isin(holdout_tickers) |
| eval_df = quarterly[eval_mask].copy() |
|
|
| if eval_df.empty: |
| return {"error": "No holdout tickers found in panel"} |
|
|
| |
| input_cols = ["ticker", "date", "sector", "industry"] |
| for c in quarterly.columns: |
| if c.startswith("stmt_"): |
| input_cols.append(c) |
| |
| |
| |
| |
| input_cols = [c for c in input_cols if c in eval_df.columns |
| and c not in _PRICE_DERIVED_COLS] |
| inputs = eval_df[input_cols].copy() |
|
|
| |
| gt = eval_df[["ticker", "date", "derived_market_cap"]].copy() |
| gt = gt.rename(columns={"derived_market_cap": "actual_market_cap"}) |
| gt = gt.dropna(subset=["actual_market_cap"]) |
|
|
| |
| inputs.to_parquet(output_dir / "private_valuation_inputs.parquet", index=False) |
| gt.to_parquet(output_dir / "private_valuation_ground_truth.parquet", index=False) |
|
|
| logger.info( |
| "Task D (Private Valuation): %d tickers, %d instances, %d input cols (no price data)", |
| gt["ticker"].nunique(), len(gt), len(input_cols), |
| ) |
|
|
| return { |
| "n_tickers": gt["ticker"].nunique(), |
| "n_instances": len(gt), |
| "n_input_cols": len(input_cols), |
| "input_cols": input_cols, |
| "date_range": [str(gt["date"].min()), str(gt["date"].max())], |
| } |
|
|
|
|
| |
| |
| |
|
|
| def build_valuation_benchmark( |
| granularity: str = "daily", |
| ) -> dict[str, Any]: |
| """Build all valuation benchmark artifacts for a given granularity. |
| |
| Reads from existing processed panel and benchmark data. |
| Writes to ``benchmark/{granularity}/``. |
| |
| Returns summary dict with per-task statistics. |
| """ |
| bench_dir = config.get_benchmark_dir(granularity) |
| bench_dir.mkdir(parents=True, exist_ok=True) |
|
|
| |
| proc_dir = config.get_processed_dir(granularity) |
| panel_path = proc_dir / "panel.parquet" |
| if not panel_path.exists(): |
| |
| panel_path = proc_dir / "panel.csv" |
| if not panel_path.exists(): |
| return {"error": f"No panel data at {proc_dir}"} |
|
|
| panel = pd.read_parquet(panel_path) if panel_path.suffix == ".parquet" else pd.read_csv(panel_path) |
|
|
| |
| info_path = config.FUNDAMENTALS_DIR / "company_info.csv" |
| company_info = pd.read_csv(info_path) if info_path.exists() else pd.DataFrame() |
|
|
| |
| scenarios_path = bench_dir / "scenarios.parquet" |
| scenarios = pd.read_parquet(scenarios_path) if scenarios_path.exists() else pd.DataFrame() |
|
|
| |
| all_tickers = sorted(panel["ticker"].unique().tolist()) |
| rng = np.random.RandomState(config.BENCHMARK_SEED) |
| n_holdout = max(1, int(len(all_tickers) * config.VALUATION_HOLDOUT_RATIO)) |
| holdout_tickers = rng.choice(all_tickers, size=n_holdout, replace=False).tolist() |
|
|
| logger.info( |
| "Building valuation benchmark: %d total tickers, %d holdout", |
| len(all_tickers), len(holdout_tickers), |
| ) |
|
|
| |
| summary: dict[str, Any] = { |
| "granularity": granularity, |
| "n_tickers_total": len(all_tickers), |
| "n_holdout": len(holdout_tickers), |
| "holdout_tickers": holdout_tickers, |
| } |
|
|
| summary["task_a"] = _build_task_a(panel, company_info, holdout_tickers, bench_dir) |
| summary["task_b"] = _build_task_b(company_info, holdout_tickers, bench_dir) |
| summary["task_c"] = _build_task_c(panel, scenarios, bench_dir) |
| summary["task_d"] = _build_task_d(panel, company_info, holdout_tickers, bench_dir) |
| summary["task_e"] = _build_task_e(company_info, holdout_tickers, bench_dir) |
| summary["task_f"] = _build_task_f(bench_dir) |
|
|
| |
| task_def = { |
| "benchmark_name": "whatif_valuation_v1", |
| "tasks": { |
| "A_valuation_accuracy": { |
| "description": "Estimate intrinsic equity value of public companies", |
| "input": "valuation_inputs.parquet", |
| "ground_truth": "valuation_ground_truth.parquet", |
| "metrics": ["MAPE", "median_APE", "rank_correlation", "directional_accuracy"], |
| "primary_metric": "MAPE", |
| "target_col": "actual_market_cap", |
| }, |
| "B_statement_generation": { |
| "description": "Generate plausible financial statements from company description", |
| "input": "generation_inputs.parquet", |
| "ground_truth": "generation_ground_truth.parquet", |
| "metrics": ["per_field_MAPE", "balance_equation_accuracy", "ontology_compliance"], |
| "primary_metric": "per_field_MAPE", |
| }, |
| "C_scenario_forecast": { |
| "description": "Forecast financial impact of what-if scenarios", |
| "input": "scenarios.parquet", |
| "ground_truth": "scenario_forecast_ground_truth.parquet", |
| "metrics": ["return_MAE", "directional_accuracy", "CI_calibration"], |
| "primary_metric": "return_MAE", |
| }, |
| "D_private_valuation": { |
| "description": "Value an unseen company using only financials + sector (PE simulation)", |
| "input": "private_valuation_inputs.parquet", |
| "ground_truth": "private_valuation_ground_truth.parquet", |
| "metrics": ["MAPE", "median_APE", "rank_correlation", "directional_accuracy"], |
| "primary_metric": "median_APE", |
| "target_col": "actual_market_cap", |
| "note": "Same holdout tickers as Task A but all price-derived columns stripped from inputs", |
| }, |
| "E_generator_evaluation": { |
| "description": "Generate financial statements for unseen companies and compare to actual XBRL filings", |
| "input": "generator_eval_inputs.parquet", |
| "ground_truth": "generator_eval_ground_truth.parquet", |
| "metrics": ["per_field_MAPE", "balance_equation_accuracy"], |
| "primary_metric": "per_field_MAPE", |
| "note": "Evaluates the Generator agent's output against actual company financials", |
| }, |
| "F_real_estate_valuation": { |
| "description": "Estimate rent and price for unseen properties given location and features", |
| "input": "re_eval_inputs.parquet", |
| "ground_truth": "re_eval_ground_truth.parquet", |
| "train_data": "re_train_properties.parquet", |
| "metrics": ["rent_MAPE", "price_MAPE"], |
| "primary_metric": "rent_MAPE", |
| "note": "70/30 random split of RentCast properties; train set serves as comps database", |
| }, |
| }, |
| "holdout_tickers": holdout_tickers, |
| } |
| (bench_dir / "valuation_tasks.json").write_text( |
| json.dumps(task_def, indent=2, default=str), |
| ) |
|
|
| logger.info("Valuation benchmark complete: %s", summary) |
| return summary |
|
|