emotion-engine / export_to_parquet.py
PremC1F's picture
Initial release: 204,520 agent-step records, 5 conditions, 6 scenarios
e7e1778 verified
"""
export_to_parquet.py
====================
Converts raw Ghost Town simulation outputs into clean Parquet files
ready for upload to Hugging Face Datasets.
Usage:
python export_to_parquet.py \
--source path/to/outputs/proof_all_scenarios \
--out path/to/emotion-engine-dataset/data
Output:
data/
baseline_0.parquet
condition_a.parquet
condition_b.parquet
condition_c.parquet
condition_d.parquet
Each Parquet file is one condition split (~40k rows).
Total size after compression: ~30-50 MB.
"""
import argparse
import json
import os
import glob
import pandas as pd
from pathlib import Path
CONDITION_NAMES = [
"baseline_0",
"condition_a",
"condition_b",
"condition_c",
"condition_d",
]
def flatten_record(r: dict) -> dict:
"""Flatten one JSONL record into a single-level dict."""
obs = r.get("observation", {})
sc = r.get("social_context", {})
aff = r.get("affect", r.get("affect_vector", {}))
lat = r.get("latent", [0.0] * 8)
prd = r.get("predictions", {})
rew = r.get("reward_components", {})
meta = r.get("metadata", {})
row = {
# Identity
"run_id": r.get("run_id", ""),
"step": int(r.get("step", 0)),
"agent_id": r.get("agent_id", ""),
"condition": r.get("condition", ""),
"seed": int(r.get("seed", 0)),
"scenario": meta.get("scenario", r.get("scenario", "")),
# Observation (14 dims)
"obs_visible_ghosts": int(obs.get("visible_ghosts", 0)),
"obs_visible_deaths": int(obs.get("visible_deaths", 0)),
"obs_nearby_allies": int(obs.get("nearby_allies", 0)),
"obs_trusted_allies": int(obs.get("trusted_allies", 0)),
"obs_supplies_seen": int(obs.get("supplies_seen", 0)),
"obs_in_shelter": bool(obs.get("in_shelter", False)),
"obs_nearest_refuge_distance": float(obs.get("nearest_refuge_distance", 0)),
"obs_steps_since_ghost_seen": int(obs.get("steps_since_ghost_seen", 20)),
"obs_steps_since_ally_died": int(obs.get("steps_since_ally_died", 20)),
"obs_steps_since_betrayal": int(obs.get("steps_since_betrayal", 20)),
"obs_ally_deaths_witnessed": int(obs.get("ally_deaths_witnessed", 0)),
"obs_betrayals_received": int(obs.get("betrayals_received", 0)),
"obs_average_trust": float(sc.get("average_trust", 0.0)),
"obs_graph_tension": float(sc.get("graph_tension", 0.0)),
# Affect / emotion vector
"affect_fear": float(aff.get("fear", 0.0)),
"affect_grief": float(aff.get("grief", 0.0)),
"affect_trust": float(aff.get("trust", 0.0)),
"affect_stress": float(aff.get("stress", 0.0)),
"affect_relief": float(aff.get("relief", 0.0)),
"affect_suspicion": float(aff.get("suspicion", 0.0)),
# Latent representation (8 dims)
**{f"latent_{i}": float(lat[i]) if i < len(lat) else 0.0
for i in range(8)},
# Future-event predictions (Condition D)
"pred_ghost_nearby_t3": float(prd.get("ghost_nearby_t3", 0.0)),
"pred_my_death_t5": float(prd.get("my_death_t5", 0.0)),
"pred_health_drop_t3": float(prd.get("health_drop_t3", 0.0)),
"pred_nearby_death_t5": float(prd.get("nearby_death_t5", 0.0)),
"pred_help_success_t5": float(prd.get("help_success_t5", 0.0)),
"pred_refusal_received_t5": float(prd.get("refusal_received_t5", 0.0)),
"pred_tie_increase_t5": float(prd.get("tie_increase_t5", 0.0)),
"pred_shelter_achieved_t2": float(prd.get("shelter_achieved_t2", 0.0)),
"pred_storm_onset_t3": float(prd.get("storm_onset_t3", 0.0)),
"pred_scarcity_t5": float(prd.get("scarcity_t5", 0.0)),
"pred_graph_tension_t3": float(prd.get("graph_tension_increase_t3",
prd.get("graph_tension_t3", 0.0))),
"pred_valence_t5": float(prd.get("valence_t5", 0.0)),
# Action & outcome
"action": r.get("action", ""),
"goal": r.get("goal", ""),
"reward_survival": float(rew.get("survival", 0.0)),
"reward_shelter": float(rew.get("shelter", 0.0)),
"reward_health": float(rew.get("health", 0.0)),
"reward_social": float(rew.get("social", 0.0)),
"total_reward": float(r.get("total_reward", 0.0)),
"alive": bool(meta.get("alive", True)),
"health": float(meta.get("health", 100.0)),
"sheltered": bool(meta.get("sheltered", False)),
"time_of_day": meta.get("time_of_day", ""),
}
return row
def load_run(jsonl_path: str) -> list[dict]:
records = []
with open(jsonl_path, encoding="utf-8") as f:
for line in f:
line = line.strip()
if not line:
continue
try:
records.append(flatten_record(json.loads(line)))
except Exception as e:
print(f" Warning: skipped malformed record in {jsonl_path}: {e}")
return records
def export(source_dir: str, out_dir: str):
source_dir = Path(source_dir)
out_dir = Path(out_dir)
out_dir.mkdir(parents=True, exist_ok=True)
# Group run folders by condition
all_runs = sorted(source_dir.glob("*/training_records.jsonl"))
if not all_runs:
raise FileNotFoundError(
f"No training_records.jsonl found under {source_dir}.\n"
"Make sure --source points to the batch output directory."
)
by_condition: dict[str, list] = {c: [] for c in CONDITION_NAMES}
for jsonl_path in all_runs:
run_name = jsonl_path.parent.name # e.g. condition_d_standard_night_seed0_12-agent
matched = None
for cname in CONDITION_NAMES:
if run_name.startswith(cname):
matched = cname
break
if matched is None:
print(f" Skipping unrecognised run: {run_name}")
continue
print(f" Loading {run_name} …", end=" ", flush=True)
rows = load_run(str(jsonl_path))
by_condition[matched].extend(rows)
print(f"{len(rows)} records")
print()
for cname, rows in by_condition.items():
if not rows:
print(f" No records for {cname} — skipping")
continue
df = pd.DataFrame(rows)
out_path = out_dir / f"{cname}.parquet"
df.to_parquet(out_path, index=False, compression="snappy")
size_mb = out_path.stat().st_size / 1e6
print(f" {cname:15s} {len(df):>7,} records -> {out_path.name} ({size_mb:.1f} MB)")
print(f"\nAll splits written to {str(out_dir)}/")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Export Ghost Town outputs to Parquet for HuggingFace.")
parser.add_argument("--source", required=True,
help="Path to batch output directory (e.g. outputs/proof_all_scenarios)")
parser.add_argument("--out", default="data",
help="Output directory for Parquet files (default: data/)")
args = parser.parse_args()
export(args.source, args.out)