| """ |
| emotion_engine.py — HuggingFace Datasets loading script |
| |
| Registered as the dataset builder for the emotion-engine HF repo. |
| Supports loading by condition name or "all" for the full dataset. |
| |
| Usage: |
| from datasets import load_dataset |
| |
| # Single condition |
| ds = load_dataset("PremC1F/emotion-engine", "condition_d") |
| |
| # All conditions concatenated |
| ds = load_dataset("PremC1F/emotion-engine", "all") |
| """ |
|
|
| import datasets |
|
|
| _CITATION = """ |
| @article{kanaparthi2026emotion, |
| author = {Kanaparthi, Prem Babu}, |
| title = {Emergent Emotional Appraisal in Generative Agents |
| via Predictive World Modeling}, |
| year = {2026}, |
| } |
| """ |
|
|
| _DESCRIPTION = """ |
| 204,520 agent-step records from a five-condition controlled experiment comparing |
| emotion-engine architectures in a 12-agent ghost-survival simulation (Ghost Town). |
| |
| Five conditions: |
| baseline_0 — no emotion system |
| condition_a — hand-coded OCC appraisal rules |
| condition_b — behavioral cloning from condition_a |
| condition_c — emotion dynamics model |
| condition_d — predictive world modeling (proposed) |
| |
| Six scenarios per condition: |
| standard_night, high_ghost_pressure, storm_scarcity, |
| ally_death, betrayal_refusal, crowded_shelter |
| |
| 8 random seeds per scenario. |
| |
| Each record contains: full 14-dim observation, 6-dim emotion vector, |
| 8-dim latent representation, 12 future-event predictions (Condition D), |
| action taken, and outcome metadata. |
| """ |
|
|
| _HOMEPAGE = "https://github.com/PremC1F/emotion-engine" |
| _LICENSE = "MIT" |
|
|
| _CONDITIONS = [ |
| "baseline_0", |
| "condition_a", |
| "condition_b", |
| "condition_c", |
| "condition_d", |
| ] |
|
|
| _FEATURES = datasets.Features({ |
| |
| "run_id": datasets.Value("string"), |
| "step": datasets.Value("int32"), |
| "agent_id": datasets.Value("string"), |
| "condition": datasets.Value("string"), |
| "seed": datasets.Value("int32"), |
| "scenario": datasets.Value("string"), |
|
|
| |
| "obs_visible_ghosts": datasets.Value("int32"), |
| "obs_visible_deaths": datasets.Value("int32"), |
| "obs_nearby_allies": datasets.Value("int32"), |
| "obs_trusted_allies": datasets.Value("int32"), |
| "obs_supplies_seen": datasets.Value("int32"), |
| "obs_in_shelter": datasets.Value("bool"), |
| "obs_nearest_refuge_distance": datasets.Value("float32"), |
| "obs_steps_since_ghost_seen": datasets.Value("int32"), |
| "obs_steps_since_ally_died": datasets.Value("int32"), |
| "obs_steps_since_betrayal": datasets.Value("int32"), |
| "obs_ally_deaths_witnessed": datasets.Value("int32"), |
| "obs_betrayals_received": datasets.Value("int32"), |
| "obs_average_trust": datasets.Value("float32"), |
| "obs_graph_tension": datasets.Value("float32"), |
|
|
| |
| "affect_fear": datasets.Value("float32"), |
| "affect_grief": datasets.Value("float32"), |
| "affect_trust": datasets.Value("float32"), |
| "affect_stress": datasets.Value("float32"), |
| "affect_relief": datasets.Value("float32"), |
| "affect_suspicion": datasets.Value("float32"), |
|
|
| |
| "latent_0": datasets.Value("float32"), |
| "latent_1": datasets.Value("float32"), |
| "latent_2": datasets.Value("float32"), |
| "latent_3": datasets.Value("float32"), |
| "latent_4": datasets.Value("float32"), |
| "latent_5": datasets.Value("float32"), |
| "latent_6": datasets.Value("float32"), |
| "latent_7": datasets.Value("float32"), |
|
|
| |
| "pred_ghost_nearby_t3": datasets.Value("float32"), |
| "pred_my_death_t5": datasets.Value("float32"), |
| "pred_health_drop_t3": datasets.Value("float32"), |
| "pred_nearby_death_t5": datasets.Value("float32"), |
| "pred_help_success_t5": datasets.Value("float32"), |
| "pred_refusal_received_t5": datasets.Value("float32"), |
| "pred_tie_increase_t5": datasets.Value("float32"), |
| "pred_shelter_achieved_t2": datasets.Value("float32"), |
| "pred_storm_onset_t3": datasets.Value("float32"), |
| "pred_scarcity_t5": datasets.Value("float32"), |
| "pred_graph_tension_t3": datasets.Value("float32"), |
| "pred_valence_t5": datasets.Value("float32"), |
|
|
| |
| "action": datasets.Value("string"), |
| "goal": datasets.Value("string"), |
| "reward_survival": datasets.Value("float32"), |
| "reward_shelter": datasets.Value("float32"), |
| "reward_health": datasets.Value("float32"), |
| "reward_social": datasets.Value("float32"), |
| "total_reward": datasets.Value("float32"), |
| "alive": datasets.Value("bool"), |
| "health": datasets.Value("float32"), |
| "sheltered": datasets.Value("bool"), |
| "time_of_day": datasets.Value("string"), |
| }) |
|
|
|
|
| class EmotionEngineConfig(datasets.BuilderConfig): |
| def __init__(self, condition="all", **kwargs): |
| super().__init__(**kwargs) |
| self.condition = condition |
|
|
|
|
| class EmotionEngine(datasets.GeneratorBasedBuilder): |
| """Ghost Town emotion engine dataset — 204,520 agent-step records.""" |
|
|
| VERSION = datasets.Version("1.0.0") |
|
|
| BUILDER_CONFIG_CLASS = EmotionEngineConfig |
|
|
| BUILDER_CONFIGS = [ |
| EmotionEngineConfig( |
| name="all", |
| version=VERSION, |
| description="All five conditions concatenated (204,520 records)", |
| condition="all", |
| ), |
| ] + [ |
| EmotionEngineConfig( |
| name=c, |
| version=VERSION, |
| description=f"Condition {c} only (~40k records, 6 scenarios, 8 seeds)", |
| condition=c, |
| ) |
| for c in _CONDITIONS |
| ] |
|
|
| DEFAULT_CONFIG_NAME = "condition_d" |
|
|
| def _info(self): |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=_FEATURES, |
| homepage=_HOMEPAGE, |
| license=_LICENSE, |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| if self.config.condition == "all": |
| files = { |
| c: dl_manager.download(f"data/{c}.parquet") |
| for c in _CONDITIONS |
| } |
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={"filepaths": list(files.values())}, |
| ) |
| ] |
| else: |
| filepath = dl_manager.download( |
| f"data/{self.config.condition}.parquet" |
| ) |
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={"filepaths": [filepath]}, |
| ) |
| ] |
|
|
| def _generate_examples(self, filepaths): |
| import pandas as pd |
| idx = 0 |
| for path in filepaths: |
| df = pd.read_parquet(path) |
| for _, row in df.iterrows(): |
| yield idx, row.to_dict() |
| idx += 1 |
|
|