Muqeeth commited on
Commit
837d5a3
·
verified ·
1 Parent(s): ffe78fe

Add files using upload-large-folder tool

Browse files
Files changed (50) hide show
  1. .hydra/config.yaml +183 -0
  2. .hydra/hydra.yaml +154 -0
  3. .hydra/overrides.yaml +1 -0
  4. seed_42/Qwen/Qwen2.5-7B-Instruct/adapters/README.md +207 -0
  5. seed_42/Qwen/Qwen2.5-7B-Instruct/adapters/agent_adapter/adapter_config.json +42 -0
  6. seed_42/Qwen/Qwen2.5-7B-Instruct/adapters/critic_adapter/adapter_config.json +42 -0
  7. src_code_for_reproducibility/__init__.py +0 -0
  8. src_code_for_reproducibility/chat_utils/__pycache__/apply_template.cpython-312.pyc +0 -0
  9. src_code_for_reproducibility/chat_utils/__pycache__/chat_turn.cpython-312.pyc +0 -0
  10. src_code_for_reproducibility/chat_utils/__pycache__/template_specific.cpython-312.pyc +0 -0
  11. src_code_for_reproducibility/docs/source/contributing.rst +0 -0
  12. src_code_for_reproducibility/docs/source/environments/diplomacy.rst +459 -0
  13. src_code_for_reproducibility/docs/source/environments/dond.rst +410 -0
  14. src_code_for_reproducibility/docs/source/environments/ipd.rst +411 -0
  15. src_code_for_reproducibility/docs/source/launch.rst +0 -0
  16. src_code_for_reproducibility/docs/source/media/runbatch.png +0 -0
  17. src_code_for_reproducibility/docs/source/src.models.dummy_local_llm.rst +7 -0
  18. src_code_for_reproducibility/docs/source/src.models.hf_agent.rst +7 -0
  19. src_code_for_reproducibility/docs/source/src.models.local_llm.rst +7 -0
  20. src_code_for_reproducibility/docs/source/src.run.rst +7 -0
  21. src_code_for_reproducibility/docs/source/src.utils.common_imports.rst +7 -0
  22. src_code_for_reproducibility/docs/source/src.utils.extra_stats.rst +7 -0
  23. src_code_for_reproducibility/docs/source/src.utils.inherit_args.rst +7 -0
  24. src_code_for_reproducibility/docs/source/src.utils.log_gpu_usage.rst +7 -0
  25. src_code_for_reproducibility/docs/source/usage.rst +0 -0
  26. src_code_for_reproducibility/markov_games/__init__.py +0 -0
  27. src_code_for_reproducibility/markov_games/alternative_actions_runner.py +138 -0
  28. src_code_for_reproducibility/markov_games/group_timesteps.py +150 -0
  29. src_code_for_reproducibility/markov_games/markov_game.py +208 -0
  30. src_code_for_reproducibility/markov_games/mg_utils.py +89 -0
  31. src_code_for_reproducibility/markov_games/rollout_tree.py +86 -0
  32. src_code_for_reproducibility/markov_games/run_markov_games.py +24 -0
  33. src_code_for_reproducibility/markov_games/simulation.py +87 -0
  34. src_code_for_reproducibility/markov_games/statistics_runner.py +405 -0
  35. src_code_for_reproducibility/models/__pycache__/__init__.cpython-312.pyc +0 -0
  36. src_code_for_reproducibility/models/__pycache__/adapter_training_wrapper.cpython-312.pyc +0 -0
  37. src_code_for_reproducibility/models/__pycache__/human_policy.cpython-312.pyc +0 -0
  38. src_code_for_reproducibility/models/__pycache__/inference_backend.cpython-312.pyc +0 -0
  39. src_code_for_reproducibility/models/__pycache__/inference_backend_dummy.cpython-312.pyc +0 -0
  40. src_code_for_reproducibility/models/__pycache__/inference_backend_sglang.cpython-312.pyc +0 -0
  41. src_code_for_reproducibility/models/__pycache__/inference_backend_vllm.cpython-312.pyc +0 -0
  42. src_code_for_reproducibility/models/__pycache__/large_language_model_api.cpython-312.pyc +0 -0
  43. src_code_for_reproducibility/models/__pycache__/large_language_model_local.cpython-312.pyc +0 -0
  44. src_code_for_reproducibility/models/__pycache__/scalar_critic.cpython-312.pyc +0 -0
  45. src_code_for_reproducibility/training/README.md +20 -0
  46. src_code_for_reproducibility/training/credit_methods.py +295 -0
  47. src_code_for_reproducibility/training/tally_tokenwise.py +276 -0
  48. src_code_for_reproducibility/training/trainer_ad_align.py +492 -0
  49. src_code_for_reproducibility/training/trainer_independent.py +155 -0
  50. src_code_for_reproducibility/training/training_data_utils.py +394 -0
.hydra/config.yaml ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ experiment:
2
+ wandb_enabled: true
3
+ nb_epochs: 3000
4
+ nb_matches_per_iteration: 64
5
+ reinit_matches_each_it: true
6
+ checkpoint_every_n_iterations: 10
7
+ start_epoch: 0
8
+ resume_experiment: true
9
+ base_seed: 42
10
+ seed_group_size: 8
11
+ train: true
12
+ stat_methods_for_live_wandb: mllm.markov_games.negotiation.negotiation_statistics
13
+ name: tas_rps_startend_ad_align_nocurrtimestep_seed42_beta2
14
+ agent_buffer: true
15
+ keep_agent_buffer_count: ${lora_count}
16
+ agent_buffer_recent_k: -1
17
+ description: Trust-and-Split Rock Paper Scissors negotiation game
18
+ logging:
19
+ wandb:
20
+ enabled: false
21
+ project: llm-negotiation
22
+ entity: null
23
+ mode: online
24
+ name: null
25
+ group: null
26
+ tags: []
27
+ notes: null
28
+ temperature: 1.0
29
+ markov_games:
30
+ runner_method_name: LinearRunner
31
+ runner_kwargs: {}
32
+ group_by_round: true
33
+ simulation_class_name: TrustAndSplitRPSSimulation
34
+ simulation_init_args:
35
+ nb_of_rounds: 10
36
+ quota_messages_per_agent_per_round: 1
37
+ alternating_hands: false
38
+ agents:
39
+ 0:
40
+ agent_id: ${agent_0_id}
41
+ agent_name: Alice
42
+ agent_class_name: TrustAndSplitRPSAgent
43
+ policy_id: base_llm/agent_adapter
44
+ init_kwargs:
45
+ goal: Maximize your total points over the whole game.
46
+ num_message_chars: 500
47
+ message_start_end_format: true
48
+ proposal_start_end_format: true
49
+ 1:
50
+ agent_id: ${agent_1_id}
51
+ agent_name: Bob
52
+ agent_class_name: TrustAndSplitRPSAgent
53
+ policy_id: base_llm/agent_adapter
54
+ init_kwargs:
55
+ goal: Maximize your total points over the whole game.
56
+ num_message_chars: 500
57
+ message_start_end_format: true
58
+ proposal_start_end_format: true
59
+ models:
60
+ base_llm:
61
+ class: LeanLocalLLM
62
+ init_args:
63
+ llm_id: base_llm
64
+ model_name: Qwen/Qwen2.5-7B-Instruct
65
+ inference_backend: vllm
66
+ hf_kwargs:
67
+ device_map: auto
68
+ torch_dtype: bfloat16
69
+ max_memory:
70
+ 0: 20GiB
71
+ attn_implementation: flash_attention_2
72
+ inference_backend_init_kwargs:
73
+ enable_lora: true
74
+ seed: ${experiment.base_seed}
75
+ enable_prefix_caching: true
76
+ max_model_len: 10000.0
77
+ gpu_memory_utilization: 0.5
78
+ dtype: bfloat16
79
+ trust_remote_code: true
80
+ max_lora_rank: 32
81
+ enforce_eager: false
82
+ max_loras: ${lora_count}
83
+ max_cpu_loras: ${lora_count}
84
+ enable_sleep_mode: true
85
+ inference_backend_sampling_params:
86
+ temperature: ${temperature}
87
+ top_p: 1.0
88
+ max_tokens: 400
89
+ top_k: -1
90
+ logprobs: 0
91
+ adapter_configs:
92
+ agent_adapter:
93
+ task_type: CAUSAL_LM
94
+ r: 32
95
+ lora_alpha: 64
96
+ lora_dropout: 0.0
97
+ target_modules: all-linear
98
+ critic_adapter:
99
+ task_type: CAUSAL_LM
100
+ r: 32
101
+ lora_alpha: 64
102
+ lora_dropout: 0.0
103
+ target_modules: all-linear
104
+ enable_thinking: null
105
+ regex_max_attempts: 1
106
+ critics:
107
+ agent_critic:
108
+ module_pointer:
109
+ - base_llm
110
+ - critic_adapter
111
+ optimizers:
112
+ agent_optimizer:
113
+ module_pointer:
114
+ - base_llm
115
+ - agent_adapter
116
+ optimizer_class_name: torch.optim.Adam
117
+ init_args:
118
+ lr: 3.0e-06
119
+ weight_decay: 0.0
120
+ critic_optimizer:
121
+ module_pointer: agent_critic
122
+ optimizer_class_name: torch.optim.Adam
123
+ init_args:
124
+ lr: 3.0e-06
125
+ weight_decay: 0.0
126
+ trainers:
127
+ agent_trainer:
128
+ class: TrainerAdAlign
129
+ module_pointers:
130
+ policy:
131
+ - base_llm
132
+ - agent_adapter
133
+ policy_optimizer: agent_optimizer
134
+ critic: agent_critic
135
+ critic_optimizer: critic_optimizer
136
+ kwargs:
137
+ entropy_coeff: 0.0
138
+ entropy_topk: null
139
+ entropy_mask_regex: null
140
+ kl_coeff: 0.001
141
+ gradient_clipping: 1.0
142
+ restrict_tokens: null
143
+ mini_batch_size: 1
144
+ use_gradient_checkpointing: true
145
+ temperature: ${temperature}
146
+ device: cuda:0
147
+ use_gae: false
148
+ whiten_advantages: false
149
+ whiten_advantages_time_step_wise: false
150
+ skip_discounted_state_visitation: true
151
+ use_gae_lambda_annealing: false
152
+ gae_lambda_annealing_method: None
153
+ gae_lambda_annealing_method_params: None
154
+ gae_lambda_annealing_limit: 0.95
155
+ discount_factor: 0.96
156
+ use_rloo: true
157
+ enable_tokenwise_logging: false
158
+ pg_loss_normalization: nb_tokens
159
+ truncated_importance_sampling_ratio_cap: 2.0
160
+ reward_normalizing_constant: 100.0
161
+ ad_align_force_coop_first_step: false
162
+ ad_align_clipping: null
163
+ ad_align_gamma: 0.96
164
+ ad_align_exclude_k_equals_t: true
165
+ ad_align_use_sign: false
166
+ ad_align_beta: 2.0
167
+ use_old_ad_align: true
168
+ use_time_regularization: false
169
+ rloo_branch: false
170
+ reuse_baseline: false
171
+ train_on_which_data:
172
+ agent_trainer: ${agent_ids}
173
+ lora_count: 30
174
+ common_agent_kwargs:
175
+ goal: Maximize your total points over the whole game.
176
+ num_message_chars: 500
177
+ message_start_end_format: true
178
+ proposal_start_end_format: true
179
+ agent_0_id: Alice
180
+ agent_1_id: Bob
181
+ agent_ids:
182
+ - Alice
183
+ - Bob
.hydra/hydra.yaml ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: ${oc.env:SCRATCH}/llm_negotiation/${now:%Y_%m}/${experiment.name}
4
+ sweep:
5
+ dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params: null
13
+ help:
14
+ app_name: ${hydra.job.name}
15
+ header: '${hydra.help.app_name} is powered by Hydra.
16
+
17
+ '
18
+ footer: 'Powered by Hydra (https://hydra.cc)
19
+
20
+ Use --hydra-help to view Hydra specific help
21
+
22
+ '
23
+ template: '${hydra.help.header}
24
+
25
+ == Configuration groups ==
26
+
27
+ Compose your configuration from those groups (group=option)
28
+
29
+
30
+ $APP_CONFIG_GROUPS
31
+
32
+
33
+ == Config ==
34
+
35
+ Override anything in the config (foo.bar=value)
36
+
37
+
38
+ $CONFIG
39
+
40
+
41
+ ${hydra.help.footer}
42
+
43
+ '
44
+ hydra_help:
45
+ template: 'Hydra (${hydra.runtime.version})
46
+
47
+ See https://hydra.cc for more info.
48
+
49
+
50
+ == Flags ==
51
+
52
+ $FLAGS_HELP
53
+
54
+
55
+ == Configuration groups ==
56
+
57
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
58
+ to command line)
59
+
60
+
61
+ $HYDRA_CONFIG_GROUPS
62
+
63
+
64
+ Use ''--cfg hydra'' to Show the Hydra config.
65
+
66
+ '
67
+ hydra_help: ???
68
+ hydra_logging:
69
+ version: 1
70
+ formatters:
71
+ simple:
72
+ format: '[%(asctime)s][HYDRA] %(message)s'
73
+ handlers:
74
+ console:
75
+ class: logging.StreamHandler
76
+ formatter: simple
77
+ stream: ext://sys.stdout
78
+ root:
79
+ level: INFO
80
+ handlers:
81
+ - console
82
+ loggers:
83
+ logging_example:
84
+ level: DEBUG
85
+ disable_existing_loggers: false
86
+ job_logging:
87
+ version: 1
88
+ formatters:
89
+ simple:
90
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
91
+ handlers:
92
+ console:
93
+ class: logging.StreamHandler
94
+ formatter: simple
95
+ stream: ext://sys.stdout
96
+ file:
97
+ class: logging.FileHandler
98
+ formatter: simple
99
+ filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
100
+ root:
101
+ level: INFO
102
+ handlers:
103
+ - console
104
+ - file
105
+ disable_existing_loggers: false
106
+ env: {}
107
+ mode: RUN
108
+ searchpath: []
109
+ callbacks: {}
110
+ output_subdir: .hydra
111
+ overrides:
112
+ hydra:
113
+ - hydra.mode=RUN
114
+ task: []
115
+ job:
116
+ name: run
117
+ chdir: false
118
+ override_dirname: ''
119
+ id: ???
120
+ num: ???
121
+ config_name: tas_rps_startend_ad_align_nocurrtimestep_seed42_beta2.yaml
122
+ env_set: {}
123
+ env_copy: []
124
+ config:
125
+ override_dirname:
126
+ kv_sep: '='
127
+ item_sep: ','
128
+ exclude_keys: []
129
+ runtime:
130
+ version: 1.3.2
131
+ version_base: '1.1'
132
+ cwd: /scratch/muqeeth/llm_negotiation
133
+ config_sources:
134
+ - path: hydra.conf
135
+ schema: pkg
136
+ provider: hydra
137
+ - path: /scratch/muqeeth/llm_negotiation/configs
138
+ schema: file
139
+ provider: main
140
+ - path: ''
141
+ schema: structured
142
+ provider: schema
143
+ output_dir: /scratch/muqeeth/llm_negotiation/2025_11/tas_rps_startend_ad_align_nocurrtimestep_seed42_beta2
144
+ choices:
145
+ hydra/env: default
146
+ hydra/callbacks: null
147
+ hydra/job_logging: default
148
+ hydra/hydra_logging: default
149
+ hydra/hydra_help: default
150
+ hydra/help: default
151
+ hydra/sweeper: basic
152
+ hydra/launcher: basic
153
+ hydra/output: default
154
+ verbose: false
.hydra/overrides.yaml ADDED
@@ -0,0 +1 @@
 
 
1
+ []
seed_42/Qwen/Qwen2.5-7B-Instruct/adapters/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2.5-7B-Instruct
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:Qwen/Qwen2.5-7B-Instruct
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.17.1
seed_42/Qwen/Qwen2.5-7B-Instruct/adapters/agent_adapter/adapter_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen2.5-7B-Instruct",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 64,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "qalora_group_size": 16,
24
+ "r": 32,
25
+ "rank_pattern": {},
26
+ "revision": null,
27
+ "target_modules": [
28
+ "down_proj",
29
+ "o_proj",
30
+ "k_proj",
31
+ "v_proj",
32
+ "q_proj",
33
+ "gate_proj",
34
+ "up_proj"
35
+ ],
36
+ "target_parameters": null,
37
+ "task_type": "CAUSAL_LM",
38
+ "trainable_token_indices": null,
39
+ "use_dora": false,
40
+ "use_qalora": false,
41
+ "use_rslora": false
42
+ }
seed_42/Qwen/Qwen2.5-7B-Instruct/adapters/critic_adapter/adapter_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen2.5-7B-Instruct",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 64,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "qalora_group_size": 16,
24
+ "r": 32,
25
+ "rank_pattern": {},
26
+ "revision": null,
27
+ "target_modules": [
28
+ "down_proj",
29
+ "o_proj",
30
+ "k_proj",
31
+ "v_proj",
32
+ "q_proj",
33
+ "gate_proj",
34
+ "up_proj"
35
+ ],
36
+ "target_parameters": null,
37
+ "task_type": "CAUSAL_LM",
38
+ "trainable_token_indices": null,
39
+ "use_dora": false,
40
+ "use_qalora": false,
41
+ "use_rslora": false
42
+ }
src_code_for_reproducibility/__init__.py ADDED
File without changes
src_code_for_reproducibility/chat_utils/__pycache__/apply_template.cpython-312.pyc ADDED
Binary file (3.64 kB). View file
 
src_code_for_reproducibility/chat_utils/__pycache__/chat_turn.cpython-312.pyc ADDED
Binary file (1.32 kB). View file
 
src_code_for_reproducibility/chat_utils/__pycache__/template_specific.cpython-312.pyc ADDED
Binary file (3.61 kB). View file
 
src_code_for_reproducibility/docs/source/contributing.rst ADDED
File without changes
src_code_for_reproducibility/docs/source/environments/diplomacy.rst ADDED
@@ -0,0 +1,459 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ =================
2
+ Diplomacy
3
+ =================
4
+
5
+ The Diplomacy environment provides a multi-agent negotiation interface for the classic board game Diplomacy,
6
+ based on DeepMind's implementation. This document describes the API for interacting with the Diplomacy environment
7
+ and its associated agent handler.
8
+
9
+ Overview
10
+ --------
11
+
12
+ Diplomacy is a strategic board game set in Europe before World War I, where players control one of seven European powers
13
+ and negotiate with each other to gain control of supply centers. The game is played in turns, with each turn consisting
14
+ of movement phases, retreat phases, and build phases.
15
+
16
+ Our implementation adapts DeepMind's Diplomacy code to the Multi-Agent Negotiation Environment standard, allowing it
17
+ to be used with LLM agents through a text-based interface.
18
+
19
+ Game Rules
20
+ ----------
21
+
22
+ ### Game Board and Powers
23
+
24
+ Diplomacy is played on a map of Europe divided into provinces. The game features seven Great Powers that players can control:
25
+
26
+ - England (blue)
27
+ - France (light blue)
28
+ - Germany (black)
29
+ - Italy (green)
30
+ - Austria-Hungary (red)
31
+ - Russia (white)
32
+ - Turkey (yellow)
33
+
34
+ Each power begins with three supply centers (except Russia, which starts with four) and an equal number of units.
35
+
36
+ ### Units and Movement
37
+
38
+ There are two types of units in Diplomacy:
39
+ - **Armies (A)**: Can move to adjacent land provinces or be convoyed across water by fleets
40
+ - **Fleets (F)**: Can move to adjacent coastal provinces and sea regions
41
+
42
+ During movement phases, each unit can execute one of these orders:
43
+ - **Hold**: The unit remains in its current province (e.g., "A PAR H")
44
+ - Format: [Unit Type] [Province] H
45
+ - Example: "A PAR H" means "Army in Paris holds its position"
46
+
47
+ - **Move**: The unit attempts to move to an adjacent province (e.g., "A PAR - BUR")
48
+ - Format: [Unit Type] [Current Province] - [Destination Province]
49
+ - Example: "A PAR - BUR" means "Army in Paris moves to Burgundy"
50
+ - Example: "F BRE - ENG" means "Fleet in Brest moves to the English Channel"
51
+
52
+ - **Support**: The unit supports another unit's move or hold (e.g., "A PAR S A MAR - BUR")
53
+ - Format for supporting a move: [Unit Type] [Province] S [Unit Type] [Province] - [Destination]
54
+ - Format for supporting a hold: [Unit Type] [Province] S [Unit Type] [Province]
55
+ - Example: "A PAR S A MAR - BUR" means "Army in Paris supports the Army in Marseille's move to Burgundy"
56
+ - Example: "F LON S F NTH" means "Fleet in London supports the Fleet in North Sea holding its position"
57
+
58
+ - **Convoy**: A fleet can convoy an army across water (e.g., "F ENG C A LON - BRE")
59
+ - Format: [Fleet] [Sea Province] C [Army] [Coastal Province] - [Coastal Province]
60
+ - Example: "F ENG C A LON - BRE" means "Fleet in English Channel convoys the Army in London to Brest"
61
+
62
+ All orders are executed simultaneously, and conflicts are resolved based on strength (number of supporting units).
63
+
64
+ ### Common Province Abbreviations
65
+
66
+ Diplomacy uses three-letter abbreviations for provinces. Some common ones include:
67
+ - **PAR**: Paris
68
+ - **LON**: London
69
+ - **BER**: Berlin
70
+ - **MUN**: Munich
71
+ - **BUR**: Burgundy
72
+ - **MAR**: Marseilles
73
+ - **BRE**: Brest
74
+ - **ENG**: English Channel
75
+ - **NTH**: North Sea
76
+ - **VIE**: Vienna
77
+ - **ROM**: Rome
78
+ - **VEN**: Venice
79
+ - **MOW**: Moscow
80
+ - **CON**: Constantinople
81
+
82
+ ### Example: Movement and Conflicts
83
+
84
+ For example, if France orders "A PAR - BUR" and Germany orders "A MUN - BUR", neither move succeeds as they have equal strength. However, if France also orders "A MAR S A PAR - BUR", then the French army from Paris would successfully move to Burgundy with strength of 2 against Germany's strength of 1.
85
+
86
+ ### Turn Structure
87
+
88
+ A game year consists of five phases:
89
+ 1. **Spring Movement**: All powers submit orders for their units
90
+ 2. **Spring Retreat**: Units dislodged in the movement phase must retreat or be disbanded
91
+ 3. **Fall Movement**: Another round of movement orders
92
+ 4. **Fall Retreat**: Retreat orders for dislodged units
93
+ 5. **Winter Adjustment**: Powers gain or lose units based on the number of supply centers they control
94
+
95
+ ### Supply Centers and Building
96
+
97
+ Supply centers (marked on the map) are key to victory. When a power occupies a supply center during a Fall turn, they gain control of it. During the Winter Adjustment phase:
98
+ - If you control more supply centers than you have units, you can build new units in your home supply centers
99
+ - If you control fewer supply centers than you have units, you must remove excess units
100
+
101
+ ### Example: Building and Removing Units
102
+
103
+ If France controls 5 supply centers but only has 4 units, during the Winter phase they can build one new unit in an unoccupied home supply center (Paris, Marseilles, or Brest). Conversely, if France controls only 3 supply centers but has 4 units, they must remove one unit of their choice.
104
+
105
+ ### Negotiation
106
+
107
+ A critical component of Diplomacy is the negotiation between players. Before submitting orders, players can communicate freely to form alliances, coordinate attacks, or mislead opponents. These negotiations are not binding, and betrayal is a common strategy.
108
+
109
+ ### Example: Alliance and Betrayal
110
+
111
+ England and France might agree to an alliance against Germany, with England promising to support France's move into Belgium. However, England could secretly order their fleet to move into Belgium themselves or support a German move instead.
112
+
113
+ ### Victory Conditions
114
+
115
+ The game ends when one power controls 18 or more supply centers (majority of the 34 total centers), or when players agree to a draw. In tournament settings, games may also end after a predetermined number of game years.
116
+
117
+ DiplomacyEnv
118
+ ------------
119
+
120
+ The ``DiplomacyEnv`` class provides an interface to the Diplomacy game environment that follows the Multi-Agent
121
+ Negotiation Environment standard.
122
+
123
+ .. code-block:: python
124
+
125
+ class DiplomacyEnv:
126
+ """
127
+ Multi-Agent Negotiation Environment for Diplomacy, adapting Deepmind's implementation
128
+ to the MarlEnvironment standard.
129
+ """
130
+ def __init__(self,
131
+ initial_state: Optional[DiplomacyState] = None,
132
+ max_turns: int = 100,
133
+ points_per_supply_centre: bool = True,
134
+ forced_draw_probability: float = 0.0,
135
+ min_years_forced_draw: int = 35):
136
+ """Initialize the Diplomacy environment.
137
+
138
+ Args:
139
+ initial_state: Initial DiplomacyState (optional)
140
+ max_turns: Maximum number of turns in the game
141
+ points_per_supply_centre: Whether to award points per supply center in case of a draw
142
+ forced_draw_probability: Probability of forcing a draw after min_years_forced_draw
143
+ min_years_forced_draw: Minimum years before considering a forced draw
144
+ """
145
+ # ...
146
+
147
+ def reset(self):
148
+ """Reset the environment to an initial state and return the initial observation.
149
+
150
+ Returns:
151
+ observation (dict): A dictionary where keys are agent identifiers and values are observations.
152
+ Each observation contains:
153
+ - board_state: Current state of the board
154
+ - current_season: Current season in the game
155
+ - player_index: Index of the player's power
156
+ - possible_actions: List of possible actions in DeepMind's format
157
+ - human_readable_actions: List of human-readable action descriptions
158
+ - supply_centers: List of supply centers owned by the player
159
+ - units: List of units owned by the player
160
+ - year: Current year in the game
161
+ """
162
+ # ...
163
+
164
+ def step(self, actions):
165
+ """Take a step in the environment using the provided actions.
166
+
167
+ Args:
168
+ actions (dict): A dictionary where keys are agent identifiers and values are actions.
169
+ Actions can be:
170
+ - List of integer actions in DeepMind's format
171
+ - List of string actions in text format (e.g., "A MUN - BER")
172
+
173
+ Returns:
174
+ observations (dict): A dictionary where keys are agent identifiers and values are observations.
175
+ Each observation has the same structure as in reset().
176
+ done (bool): Whether the episode has ended.
177
+ info (dict): Additional information about the environment, including:
178
+ - turn: Current turn number
179
+ - returns: Game returns if the game is done, otherwise None
180
+ - waiting_for: List of agents that still need to provide actions (if not all actions are provided)
181
+ """
182
+ # ...
183
+
184
+ def get_log_info(self):
185
+ """Get additional information about the environment for logging.
186
+
187
+ Returns:
188
+ log_info (dict): Information about the environment required to log the game, including:
189
+ - power_names: List of power names
190
+ - game_history: History of the game
191
+ - current_turn: Current turn number
192
+ - current_season: Current season name
193
+ - supply_centers: Dictionary mapping power names to supply center counts
194
+ """
195
+ # ...
196
+
197
+ def render(self):
198
+ """Render the current state of the environment.
199
+
200
+ Displays a visualization of the current game state.
201
+ """
202
+ # ...
203
+
204
+ def close(self):
205
+ """Perform any necessary cleanup."""
206
+ # ...
207
+
208
+
209
+ Key Implementation Details
210
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
211
+
212
+ The ``DiplomacyEnv`` class implements several key features:
213
+
214
+ 1. **Multi-Agent Support**: The environment tracks multiple agents (powers) and manages their interactions.
215
+
216
+ 2. **Turn-Based Gameplay**: The environment enforces the turn structure of Diplomacy, including different phases.
217
+
218
+ 3. **Action Processing**: The environment can handle actions in both text format and DeepMind's integer format.
219
+
220
+ 4. **Observation Generation**: The environment generates detailed observations for each agent, including board state, supply centers, and possible actions.
221
+
222
+ 5. **Game Termination**: The environment tracks game termination conditions, including supply center victory and maximum turn limits.
223
+
224
+ Observation Structure
225
+ ~~~~~~~~~~~~~~~~~~~~
226
+
227
+ Each agent receives an observation dictionary with the following structure:
228
+
229
+ .. code-block:: python
230
+
231
+ {
232
+ "board_state": np.ndarray, # Board state representation
233
+ "current_season": int, # Season index (0-4)
234
+ "player_index": int, # Index of the player's power (0-6)
235
+ "possible_actions": [int], # List of possible actions in DeepMind's format
236
+ "human_readable_actions": [str], # List of human-readable action descriptions
237
+ "supply_centers": [str], # List of supply centers owned by the player
238
+ "units": [dict], # List of units owned by the player
239
+ "year": int # Current year in the game
240
+ }
241
+
242
+ Action Structure
243
+ ~~~~~~~~~~~~~~~
244
+
245
+ Actions can be provided in two formats:
246
+
247
+ 1. **Text Format**: String actions like ``"A MUN - BER"`` or ``"F NTH C A LON - BEL"``.
248
+
249
+ 2. **Integer Format**: Lists of integers corresponding to DeepMind's action representation.
250
+
251
+ The environment will convert text actions to the internal format as needed.
252
+
253
+ DiplomacyAgent
254
+ --------------
255
+
256
+ The ``DiplomacyAgent`` class implements the agent handler interface for Diplomacy, processing observations from the environment and generating actions through an LLM.
257
+
258
+ .. code-block:: python
259
+
260
+ class DiplomacyAgent:
261
+ """
262
+ Agent handler for Diplomacy, implementing the AgentState interface
263
+ for the multi-agent negotiation standard.
264
+ """
265
+
266
+ def __init__(self,
267
+ power_name: str,
268
+ use_text_interface: bool = True,
269
+ system_prompt: Optional[str] = None):
270
+ """Initialize the Diplomacy agent handler.
271
+
272
+ Args:
273
+ power_name: Name of the power this agent controls
274
+ use_text_interface: Whether to use text-based interface (vs. structured)
275
+ system_prompt: Optional system prompt to use for the LLM
276
+ """
277
+ # ...
278
+
279
+ def step(self, observation_from_env, policy_output=None):
280
+ """Update the agent state based on the observation and action.
281
+
282
+ Args:
283
+ observation_from_env: The observation from the environment, with structure:
284
+ - board_state: Current state of the board
285
+ - current_season: Current season in the game
286
+ - player_index: Index of the player's power
287
+ - possible_actions: List of possible actions
288
+ - human_readable_actions: List of human-readable action descriptions
289
+ - supply_centers: List of supply centers owned by the player
290
+ - units: List of units owned by the player
291
+ - year: Current year in the game
292
+
293
+ policy_output: The output of the policy (LLM response), or None for initial prompt
294
+
295
+ Returns:
296
+ policy_id (str): The policy identifier ("llm_policy")
297
+ policy_input (dict): The input to the policy, with structure:
298
+ - messages: List of conversation messages in the format:
299
+ [{"role": "system", "content": "..."},
300
+ {"role": "user", "content": "..."}]
301
+ action: The official action to be sent to the environment, or None if not ready
302
+ done (bool): Whether the LLM action is ready to be sent to the environment
303
+ info (dict): Additional information about the agent:
304
+ - valid_action: Whether the extracted action is valid
305
+ """
306
+ # ...
307
+
308
+ def get_log_info(self):
309
+ """Get information about the agent required to log a trajectory.
310
+
311
+ Returns:
312
+ log_info (dict): Information about the agent required to log a trajectory:
313
+ - power_name: Name of the power this agent controls
314
+ - conversation_history: List of conversation messages
315
+ - current_action: The current action, if any
316
+ """
317
+ # ...
318
+
319
+ def render(self):
320
+ """Render the current state of the agent.
321
+
322
+ Displays the agent's current state, including conversation history.
323
+ """
324
+ # ...
325
+
326
+ def close(self):
327
+ """Perform any necessary cleanup."""
328
+ # ...
329
+
330
+
331
+ Key Implementation Details
332
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
333
+
334
+ The ``DiplomacyAgent`` class implements several key features:
335
+
336
+ 1. **LLM Interaction**: The agent generates prompts for an LLM and processes the LLM's responses to extract actions.
337
+
338
+ 2. **Conversation Management**: The agent maintains a conversation history for coherent interactions with the LLM.
339
+
340
+ 3. **Action Validation**: The agent validates extracted actions against the set of possible actions provided by the environment.
341
+
342
+ 4. **Error Handling**: The agent generates clarification prompts when invalid actions are detected.
343
+
344
+ 5. **Text-Based Interface**: The agent formats game state information into human-readable text for the LLM.
345
+
346
+ Prompt Structure
347
+ ~~~~~~~~~~~~~~~
348
+
349
+ The agent generates prompts that include:
350
+
351
+ 1. **System Prompt**: Instructions and context for the LLM, explaining its role as a Diplomacy player.
352
+
353
+ 2. **Game State Description**: A text description of the current game state, including:
354
+ - Current year and season
355
+ - Supply centers owned
356
+ - Units controlled
357
+ - Possible actions
358
+
359
+ 3. **Action Request**: Instructions on how to format actions.
360
+
361
+ Example system prompt:
362
+
363
+ .. code-block:: text
364
+
365
+ You are playing the role of FRANCE in a game of Diplomacy.
366
+ Your goal is to control as many supply centers as possible.
367
+ You can negotiate with other players and form alliances, but remember that
368
+ these alliances are not binding. When you need to submit orders for your units,
369
+ write them in the correct format, with each order on a new line.
370
+
371
+ Example game state description:
372
+
373
+ .. code-block:: text
374
+
375
+ Year: 1901, Season: SPRING_MOVES
376
+ You are playing as FRANCE.
377
+ You currently control 3 supply centers: PAR, MAR, BRE.
378
+ Your units are: A PAR, A MAR, F BRE.
379
+
380
+ Please provide orders for your units. Here are your possible actions:
381
+ A PAR - BUR
382
+ A PAR - GAS
383
+ A PAR - PIC
384
+ A PAR H
385
+ ...
386
+
387
+ Submit your orders, one per line, in the format like: "A MUN - BER" or "F NTH C A LON - BEL"
388
+
389
+ Running Diplomacy Games
390
+ ----------------------
391
+
392
+ To run Diplomacy games with LLM agents, you can use the ``run_batched_matches`` function with the ``DiplomacyEnv`` and ``DiplomacyAgent`` classes:
393
+
394
+ .. code-block:: python
395
+
396
+ from mllm.environments.diplomacy.diplomacy_env import DiplomacyEnv
397
+ from mllm.environments.diplomacy.diplomacy_agent import DiplomacyAgent
398
+ from mllm.run_matches import run_batched_matches
399
+
400
+ # Create environment and agent handlers
401
+ env = DiplomacyEnv(max_turns=30)
402
+
403
+ agent_handlers = {
404
+ "AUSTRIA": DiplomacyAgent(power_name="AUSTRIA"),
405
+ "ENGLAND": DiplomacyAgent(power_name="ENGLAND"),
406
+ "FRANCE": DiplomacyAgent(power_name="FRANCE"),
407
+ "GERMANY": DiplomacyAgent(power_name="GERMANY"),
408
+ "ITALY": DiplomacyAgent(power_name="ITALY"),
409
+ "RUSSIA": DiplomacyAgent(power_name="RUSSIA"),
410
+ "TURKEY": DiplomacyAgent(power_name="TURKEY")
411
+ }
412
+
413
+ # Define policy mapping (mapping from policy IDs to actual policy functions)
414
+ policy_mapping = {
415
+ "llm_policy": my_llm_policy_function
416
+ }
417
+
418
+ # Run the game
419
+ game_results = run_batched_matches(
420
+ envs=[env],
421
+ agent_handlers_per_env=[agent_handlers],
422
+ policy_mapping=policy_mapping,
423
+ max_parallel_matches=1
424
+ )
425
+
426
+ # Process results
427
+ for result in game_results:
428
+ print(f"Game finished. Winner: {result['winner']}")
429
+ print(f"Supply centers: {result['supply_centers']}")
430
+
431
+ This setup allows you to run Diplomacy games with LLM agents using the Multi-Agent Negotiation Environment standard.
432
+
433
+ Limitations and Considerations
434
+ -----------------------------
435
+
436
+ 1. **Performance**: Processing observations and actions for seven powers using LLMs can be computationally intensive.
437
+
438
+ 2. **Action Parsing**: Extracting valid actions from LLM outputs may require sophisticated parsing and error handling.
439
+
440
+ 3. **Game Complexity**: Diplomacy is a complex game with many rules and edge cases, which may be challenging for LLMs to fully grasp.
441
+
442
+ 4. **Turn Duration**: Real Diplomacy games include negotiation phases of variable duration, which are not fully captured in this implementation.
443
+
444
+ 5. **Text Formatting**: The quality of LLM interactions depends heavily on the formatting and clarity of text prompts.
445
+
446
+ Advanced Usage
447
+ ------------
448
+
449
+ For advanced usage, you can customize:
450
+
451
+ 1. **System Prompts**: Modify agent behavior by providing custom system prompts.
452
+
453
+ 2. **Observation Processing**: Extend the observation processing to include additional information.
454
+
455
+ 3. **Action Parsing**: Implement more sophisticated action parsing for complex orders.
456
+
457
+ 4. **Visualization**: Add custom visualization methods to the environment's render function.
458
+
459
+ 5. **Logging**: Extend the logging capabilities to capture additional information about the game state.
src_code_for_reproducibility/docs/source/environments/dond.rst ADDED
@@ -0,0 +1,410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ =================
2
+ Deal or No Deal
3
+ =================
4
+
5
+ The Deal or No Deal (DoND) environment provides a multi-agent negotiation interface where players trade
6
+ items with different values. This document describes the API for interacting with the DoND environment
7
+ and its associated agent handler.
8
+
9
+ Overview
10
+ --------
11
+
12
+ Deal or No Deal is a negotiation game where two agents must agree on how to divide a set of items,
13
+ each of which has different values to each agent. The agents engage in a back-and-forth dialogue to
14
+ determine an allocation of the items, with each trying to maximize their own total value.
15
+
16
+ Our implementation follows the Multi-Agent Negotiation Environment standard, allowing it to be used
17
+ with LLM agents through a text-based interface.
18
+
19
+ Game Rules
20
+ ----------
21
+
22
+ ### Basic Structure
23
+
24
+ The core mechanics of Deal or No Deal are:
25
+
26
+ 1. Two agents negotiate over a set of items (e.g., books, balls, hats)
27
+ 2. Each item has:
28
+ - A specific quantity (how many of each item is available)
29
+ - A value for each agent (which may differ between agents)
30
+ 3. Agents take turns sending messages to negotiate how to split the items
31
+ 4. Once an agreement is reached, agents finalize the deal
32
+ 5. Points are awarded based on the value of items each agent receives
33
+
34
+ ### Detailed Gameplay
35
+
36
+ #### Setup Phase
37
+
38
+ The game begins with:
39
+ - A set of items (e.g., "book", "hat", "ball")
40
+ - Each item has a quantity (e.g., 6 books, 2 hats, 4 balls)
41
+ - Each agent has private values for each item (e.g., books might be worth 5 points to one agent but only 2 points to the other)
42
+ - Agents are assigned roles (starting negotiator and responding negotiator)
43
+
44
+ #### Negotiation Phase
45
+
46
+ 1. Agents take turns sending free-form text messages to each other
47
+ 2. Messages can include offers, counter-offers, questions, or strategic communication
48
+ 3. There is a maximum number of messages permitted (preventing endless negotiations)
49
+ 4. Either agent can propose to finalize an agreement at any time
50
+
51
+ For example:
52
+ - Agent 1: "I propose I get all the books and you get all the hats and balls."
53
+ - Agent 2: "That doesn't work for me. How about you get 3 books and I get 3 books, all the hats, and all the balls?"
54
+ - Agent 1: "Let me counter-offer: I get 4 books and 2 balls, you get 2 books, all hats, and 2 balls."
55
+
56
+ #### Finalization Phase
57
+
58
+ 1. When an agent wants to finalize a deal, they must specify the exact allocation:
59
+ - How many of each item they receive
60
+ - How many of each item the other agent receives
61
+ 2. The other agent must then either agree (by submitting the same allocation) or reject the finalization
62
+ 3. If both agents submit matching finalizations, the deal is executed
63
+ 4. If finalizations don't match, no agreement is reached, and both agents receive 0 points
64
+
65
+ #### Scoring
66
+
67
+ 1. Each agent's score is calculated based on the value of items they receive
68
+ 2. The formula is: Sum(quantity_of_item_i × value_of_item_i_to_agent)
69
+ 3. If no agreement is reached, both agents receive 0 points
70
+
71
+ ### Example Game
72
+
73
+ Let's walk through a simple example:
74
+
75
+ **Setup:**
76
+ - Items: Books (4), Hats (2), Balls (6)
77
+ - Agent 1 values: Books=5, Hats=1, Balls=2
78
+ - Agent 2 values: Books=3, Hats=6, Balls=1
79
+
80
+ **Negotiation (simplified):**
81
+ 1. Agent 1: "I would like all the books and balls. You can have the hats."
82
+ 2. Agent 2: "That doesn't work for me. Books are valuable. I propose I get all the hats and 2 books, you get 2 books and all the balls."
83
+ 3. Agent 1: "How about I get 3 books and all the balls, and you get 1 book and all the hats?"
84
+ 4. Agent 2: "I accept your proposal."
85
+
86
+ **Finalization:**
87
+ - Agent 1 submits: Agent 1 gets (Books: 3, Hats: 0, Balls: 6), Agent 2 gets (Books: 1, Hats: 2, Balls: 0)
88
+ - Agent 2 submits the same allocation, confirming agreement
89
+
90
+ **Scoring:**
91
+ - Agent 1 score: (3 books × 5) + (0 hats × 1) + (6 balls × 2) = 15 + 0 + 12 = 27 points
92
+ - Agent 2 score: (1 book × 3) + (2 hats × 6) + (0 balls × 1) = 3 + 12 + 0 = 15 points
93
+
94
+ ### Game Variations
95
+
96
+ The DoND environment supports several variations through configuration parameters:
97
+
98
+ #### Different Value Distributions
99
+
100
+ The environment offers multiple ways to assign values to items:
101
+
102
+ 1. **Standard Random Setup (dond_random_setup)**:
103
+ - Items have even-numbered quantities
104
+ - Each agent receives distinct random values for each item
105
+ - Values are drawn from a uniform distribution
106
+
107
+ 2. **Independent Random Values (independent_random_vals)**:
108
+ - Item quantities can be any number in the specified range
109
+ - Values for each agent are drawn independently
110
+ - Creates more varied negotiation scenarios
111
+
112
+ 3. **Bicameral Value Distribution (bicameral_vals_assignator)**:
113
+ - Creates a "high value" and "low value" distribution for each item
114
+ - Each agent values approximately half the items highly and half lowly
115
+ - Values are drawn from normal distributions with different means
116
+ - Creates scenarios with clear trade opportunities
117
+
118
+ #### Visibility Options
119
+
120
+ 1. **Finalization Visibility**:
121
+ - When enabled, both agents can see each other's finalization proposals
122
+ - When disabled, finalization proposals remain private until both are submitted
123
+
124
+ 2. **Other Values Visibility**:
125
+ - When enabled, agents can see each other's value functions
126
+ - When disabled, agents only know their own values
127
+ - Creates information asymmetry and richer negotiation dynamics
128
+
129
+ #### Game Modes
130
+
131
+ 1. **Cooperative Mode ("coop")**:
132
+ - Agents are encouraged to find mutually beneficial solutions
133
+ - Success is measured by the sum of both agents' scores
134
+
135
+ 2. **Competitive Mode ("comp")**:
136
+ - Agents aim to maximize their individual scores
137
+ - Creates more adversarial negotiations
138
+
139
+ #### Round Structure
140
+
141
+ 1. **Single Round**:
142
+ - One negotiation session between the same agents
143
+ - Simple evaluation of negotiation skills
144
+
145
+ 2. **Multiple Rounds**:
146
+ - Agents negotiate multiple times with different item setups
147
+ - Allows for learning and adaptation over time
148
+ - Roles can be swapped between rounds
149
+
150
+ DondEnv
151
+ ------------
152
+
153
+ The ``DondEnv`` class provides an interface to the Deal or No Deal environment that follows the Multi-Agent
154
+ Negotiation Environment standard.
155
+
156
+ .. code-block:: python
157
+
158
+ class DondEnv:
159
+ """
160
+ Multi-Agent Negotiation Environment for Deal or No Deal.
161
+ """
162
+ def __init__(
163
+ self,
164
+ agents,
165
+ mode="coop",
166
+ max_messages=None,
167
+ min_messages=None,
168
+ max_chars_per_message=None,
169
+ rounds_per_game=1,
170
+ random_setup_func=None,
171
+ random_setup_kwargs=None,
172
+ role_assignator_func=None,
173
+ role_assignator_func_kwargs=None,
174
+ finalization_visibility=False,
175
+ other_values_visibility=False,
176
+ random_seed=None
177
+ ):
178
+ """Initialize the Deal or No Deal environment.
179
+
180
+ Args:
181
+ agents: List of agent IDs participating in the game
182
+ mode: Game mode ("coop" or "comp")
183
+ max_messages: Maximum number of messages per agent per round
184
+ min_messages: Minimum number of messages per agent per round
185
+ max_chars_per_message: Maximum characters per message
186
+ rounds_per_game: Number of negotiation rounds to play
187
+ random_setup_func: Function to generate item quantities and values
188
+ random_setup_kwargs: Arguments for the random setup function
189
+ role_assignator_func: Function to assign roles to agents
190
+ role_assignator_func_kwargs: Arguments for the role assignator
191
+ finalization_visibility: Whether agents can see each other's finalizations
192
+ other_values_visibility: Whether agents can see each other's values
193
+ random_seed: Seed for reproducibility
194
+ """
195
+ # ...
196
+
197
+ def reset(self):
198
+ """Reset the environment to an initial state and return the initial observation.
199
+
200
+ Returns:
201
+ observation (dict): A dictionary where keys are agent identifiers and values are observations.
202
+ """
203
+ # ...
204
+
205
+ def step(self, actions):
206
+ """Take a step in the environment using the provided actions.
207
+
208
+ Args:
209
+ actions (dict): A dictionary where keys are agent identifiers and values are actions.
210
+ Actions can be messages or finalization proposals.
211
+
212
+ Returns:
213
+ observations (dict): A dictionary where keys are agent identifiers and values are observations.
214
+ done (bool): Whether the episode has ended.
215
+ info (dict): Additional information about the environment.
216
+ """
217
+ # ...
218
+
219
+ def get_state(self):
220
+ """Retrieve the current state of the game.
221
+
222
+ Returns:
223
+ state (dict): The current state of the game, including items, quantities, values, etc.
224
+ """
225
+ # ...
226
+
227
+ Key Implementation Details
228
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
229
+
230
+ The ``DondEnv`` class implements several key features:
231
+
232
+ 1. **Multi-Agent Support**: The environment tracks two agents and manages their alternating messages.
233
+
234
+ 2. **Turn-Based Dialogue**: The environment enforces turn structure and limits on message count.
235
+
236
+ 3. **Finalization Processing**: The environment validates and processes finalization proposals.
237
+
238
+ 4. **Random Setup**: The environment supports multiple methods of generating negotiation scenarios.
239
+
240
+ 5. **Round Management**: The environment can handle multiple rounds with different setups.
241
+
242
+ Observation Structure
243
+ ~~~~~~~~~~~~~~~~~~~~
244
+
245
+ Each agent receives an observation (state) dictionary with rich information about the game:
246
+
247
+ .. code-block:: python
248
+
249
+ {
250
+ "mode": str, # Game mode ("coop" or "comp")
251
+ "role_values": dict, # Value mappings for each role
252
+ "role_props": dict, # Properties for each role
253
+ "agent_to_role": dict, # Mapping from agent IDs to roles
254
+ "is_new_round": bool, # Whether this is the start of a new round
255
+ "is_new_game": bool, # Whether this is the start of a new game
256
+ "game_over": bool, # Whether the game is over
257
+ "items": list, # List of item names
258
+ "quantities": dict, # Quantities of each item
259
+ "has_finalized": bool, # Whether finalization has been proposed
260
+ "last_message": dict, # The last message sent
261
+ "messages_remaining": dict, # Number of messages each agent can still send
262
+ # And various history tracking fields
263
+ }
264
+
265
+ Action Structure
266
+ ~~~~~~~~~~~~~~~
267
+
268
+ Actions can be:
269
+
270
+ 1. **Text Messages**: Free-form text for negotiation.
271
+ 2. **Finalization Proposals**: Structured data specifying the exact allocation of items.
272
+
273
+ Example finalization format:
274
+
275
+ .. code-block:: python
276
+
277
+ {
278
+ "type": "finalize",
279
+ "allocation": {
280
+ "agent1": {"book": 3, "hat": 0, "ball": 6},
281
+ "agent2": {"book": 1, "hat": 2, "ball": 0}
282
+ }
283
+ }
284
+
285
+ Value Setup Functions
286
+ --------------------
287
+
288
+ The DoND environment provides several functions for setting up item values:
289
+
290
+ .. code-block:: python
291
+
292
+ def dond_random_setup(items, min_quant, max_quant, min_val, max_val, random_seed=None):
293
+ """
294
+ Generates items, even-numbered quantities and distinct random values for each category for both agents.
295
+
296
+ Args:
297
+ items (list): List of items.
298
+ min_quant (int): Minimum quantity per item.
299
+ max_quant (int): Maximum quantity per item.
300
+ min_val (int): Minimum value per item.
301
+ max_val (int): Maximum value per item.
302
+ random_seed (int, optional): Seed for random generation.
303
+
304
+ Returns:
305
+ tuple: (items, quantities, (val_starting_negotiator, val_responding_negotiator))
306
+ """
307
+ # ...
308
+
309
+ def independent_random_vals(items, min_quant, max_quant, min_val, max_val, random_seed=None):
310
+ """
311
+ Generates random quantities and independent random values for both agents.
312
+
313
+ Args:
314
+ Similar to dond_random_setup
315
+
316
+ Returns:
317
+ tuple: (items, quantities, (val_starting_negotiator, val_responding_negotiator))
318
+ """
319
+ # ...
320
+
321
+ def bicameral_vals_assignator(items, min_quant, max_quant, low_val_mean, low_val_std, high_val_mean, high_val_std, random_seed=None):
322
+ """
323
+ Generates values with a bicameral distribution - each agent values half the items highly.
324
+
325
+ Args:
326
+ items (list): List of items.
327
+ min_quant, max_quant: Range for quantities
328
+ low_val_mean, low_val_std: Mean and standard deviation for the "low value" distribution
329
+ high_val_mean, high_val_std: Mean and standard deviation for the "high value" distribution
330
+ random_seed: Seed for reproducibility
331
+
332
+ Returns:
333
+ tuple: (items, quantities, (val_starting_negotiator, val_responding_negotiator))
334
+ """
335
+ # ...
336
+
337
+ Running DoND Games
338
+ ----------------------
339
+
340
+ To run Deal or No Deal games with LLM agents, you can use the following structure:
341
+
342
+ .. code-block:: python
343
+
344
+ from mllm.environments.dond.dond_game import DondEnv
345
+ from mllm.environments.dond.dond_agent import DondAgent
346
+ from src.run_matches import run_batched_matches
347
+
348
+ # Create environment
349
+ env = DondEnv(
350
+ agents=["agent1", "agent2"],
351
+ mode="coop",
352
+ max_messages=10,
353
+ rounds_per_game=1,
354
+ random_setup_func="dond_random_setup",
355
+ random_setup_kwargs={
356
+ "items": ["book", "hat", "ball"],
357
+ "min_quant": 2,
358
+ "max_quant": 8,
359
+ "min_val": 1,
360
+ "max_val": 10
361
+ },
362
+ finalization_visibility=False
363
+ )
364
+
365
+ # Create agent handlers (implementation details would vary)
366
+ agent_handlers = {
367
+ "agent1": DondAgent(agent_id="agent1"),
368
+ "agent2": DondAgent(agent_id="agent2")
369
+ }
370
+
371
+ # Define policy mapping
372
+ policy_mapping = {
373
+ "llm_policy": my_llm_policy_function
374
+ }
375
+
376
+ # Run the game
377
+ game_results = run_batched_matches(
378
+ envs=[env],
379
+ agent_handlers_per_env=[agent_handlers],
380
+ policy_mapping=policy_mapping,
381
+ max_parallel_matches=1
382
+ )
383
+
384
+ Limitations and Considerations
385
+ -----------------------------
386
+
387
+ 1. **Negotiation Complexity**: The open-ended nature of negotiations can be challenging for some LLM agents.
388
+
389
+ 2. **Parsing Challenges**: Extracting structured finalization proposals from free-form text requires robust parsing.
390
+
391
+ 3. **Optimization Opportunities**: Different agents may employ different negotiation strategies to optimize outcomes.
392
+
393
+ 4. **Fairness Evaluation**: The environment allows research into questions of fair division and Pareto optimality.
394
+
395
+ 5. **Strategic Deception**: Agents might strategically misrepresent their true values, adding complexity to negotiations.
396
+
397
+ Advanced Usage
398
+ ------------
399
+
400
+ For advanced usage, you can:
401
+
402
+ 1. **Custom Value Functions**: Create more complex distributions of item values for specific research questions.
403
+
404
+ 2. **Novel Negotiation Scenarios**: Design item sets and values to test specific negotiation skills.
405
+
406
+ 3. **Curriculum Learning**: Create progressively more difficult negotiation scenarios.
407
+
408
+ 4. **Communication Analysis**: Analyze the language and strategies used in successful negotiations.
409
+
410
+ 5. **Multi-Round Dynamics**: Study how agents adapt their strategies over multiple rounds.
src_code_for_reproducibility/docs/source/environments/ipd.rst ADDED
@@ -0,0 +1,411 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ =================
2
+ Iterated Prisoner's Dilemma
3
+ =================
4
+
5
+ The Iterated Prisoner's Dilemma environment provides a classic game theory setting for studying cooperation
6
+ and competition between agents. This document describes the API for interacting with the IPD environment
7
+ and its associated agent handler.
8
+
9
+ Overview
10
+ --------
11
+
12
+ The Prisoner's Dilemma is a fundamental problem in game theory that demonstrates why two rational individuals might not
13
+ cooperate, even when it appears in their best interest to do so. In the iterated version, the same two players
14
+ repeatedly face the same dilemma, allowing for the development of trust or retaliation based on previous interactions.
15
+
16
+ Our implementation follows the Multi-Agent Negotiation Environment standard, allowing it to be used with
17
+ LLM agents through a text-based interface.
18
+
19
+ Game Rules
20
+ ----------
21
+
22
+ ### Basic Premise
23
+
24
+ The scenario behind the Prisoner's Dilemma is as follows:
25
+
26
+ Two criminals are arrested and imprisoned. Each prisoner is in solitary confinement with no means of communicating with
27
+ the other. The prosecutors lack sufficient evidence to convict the pair on the principal charge, but they have enough
28
+ to convict both on a lesser charge. Simultaneously, the prosecutors offer each prisoner a bargain:
29
+
30
+ - If both prisoners betray each other, each serves 2 years in prison (the "punishment" payoff)
31
+ - If one betrays the other while the other remains silent, the betrayer goes free (the "temptation" payoff) while the
32
+ silent accomplice serves 3 years (the "sucker" payoff)
33
+ - If both remain silent, each serves only 1 year in prison (the "reward" payoff)
34
+
35
+ ### Game Mechanics
36
+
37
+ In our implementation, the choices are simplified to:
38
+ - **C**: Cooperate (remain silent)
39
+ - **D**: Defect (betray the other prisoner)
40
+
41
+ Each round, both players simultaneously choose either C or D, and receive points based on the combination of their choices:
42
+
43
+ - Both choose C: Both receive the "reward" payoff (3 points by default)
44
+ - Both choose D: Both receive the "punishment" payoff (1 point by default)
45
+ - One chooses C, one chooses D: The defector receives the "temptation" payoff (5 points by default), while the cooperator
46
+ receives the "sucker" payoff (0 points by default)
47
+
48
+ ### Example: Single Round
49
+
50
+ Let's see how a single round plays out:
51
+
52
+ 1. Alice and Bob simultaneously make their choices
53
+ 2. If Alice chooses C and Bob chooses C:
54
+ - Alice receives 3 points
55
+ - Bob receives 3 points
56
+ 3. If Alice chooses C and Bob chooses D:
57
+ - Alice receives 0 points
58
+ - Bob receives 5 points
59
+ 4. If Alice chooses D and Bob chooses C:
60
+ - Alice receives 5 points
61
+ - Bob receives 0 points
62
+ 5. If Alice chooses D and Bob chooses D:
63
+ - Alice receives 1 point
64
+ - Bob receives 1 point
65
+
66
+ ### Iterated Game Structure
67
+
68
+ The iterated version repeats this basic game for a fixed number of rounds. The key features are:
69
+
70
+ 1. Players know the total number of rounds in advance
71
+ 2. After each round, players learn what choice the other player made
72
+ 3. Players maintain a cumulative score across all rounds
73
+ 4. Players can adjust their strategy based on the history of previous interactions
74
+
75
+ ### Game Variations
76
+
77
+ The IPD environment supports several variations through configuration parameters:
78
+
79
+ #### Different Payoff Matrices
80
+
81
+ The standard payoff values can be modified to create different incentive structures:
82
+ - **Traditional PD**: reward=3, punishment=1, temptation=5, sucker=0
83
+ - **Weak Temptation**: reward=3, punishment=1, temptation=4, sucker=0 (reduces the incentive to defect)
84
+ - **Harsh Punishment**: reward=3, punishment=0, temptation=5, sucker=0 (increases the cost of mutual defection)
85
+ - **Generous**: reward=4, punishment=2, temptation=5, sucker=1 (cushions the blow of being betrayed)
86
+
87
+ #### Game Length Variations
88
+
89
+ The number of rounds can significantly impact strategy:
90
+ - **Short Games** (5-10 rounds): Incentivizes more defection, especially near the end
91
+ - **Medium Games** (20-50 rounds): Allows for the development of tit-for-tat and forgiveness strategies
92
+ - **Long Games** (100+ rounds): Favors steady cooperation with occasional "probing" defections
93
+
94
+ ### Common Strategies
95
+
96
+ While not enforced by the environment, several well-known strategies can emerge:
97
+ - **Always Cooperate**: Always choose C
98
+ - **Always Defect**: Always choose D
99
+ - **Tit for Tat**: Start with C, then copy what the opponent did in the previous round
100
+ - **Forgiving Tit for Tat**: Like Tit for Tat, but occasionally cooperate even after being defected against
101
+ - **Grudger**: Cooperate until the opponent defects once, then always defect
102
+ - **Random**: Choose randomly between C and D
103
+
104
+ IPDEnv
105
+ ------
106
+
107
+ The ``IPDEnv`` class provides an interface to the Iterated Prisoner's Dilemma environment that follows the
108
+ Multi-Agent Negotiation Environment standard.
109
+
110
+ .. code-block:: python
111
+
112
+ class IPDEnv:
113
+ """
114
+ Iterated Prisoner's Dilemma environment following the MarlEnvironment standard.
115
+
116
+ In each round of the game, two agents simultaneously choose to either cooperate (C) or defect (D).
117
+ The payoffs are as follows:
118
+ - If both cooperate: Both receive the "reward" (usually 3 points)
119
+ - If both defect: Both receive the "punishment" (usually 1 point)
120
+ - If one cooperates and one defects: The defector receives the "temptation" (usually 5 points)
121
+ and the cooperator receives the "sucker" payoff (usually 0 points)
122
+
123
+ The game is played for a specified number of rounds.
124
+ """
125
+
126
+ def __init__(
127
+ self,
128
+ rounds_per_game: int = 10,
129
+ reward: float = 3.0, # Both cooperate
130
+ punishment: float = 1.0, # Both defect
131
+ temptation: float = 5.0, # Defector's reward when other cooperates
132
+ sucker: float = 0.0, # Cooperator's reward when other defects
133
+ random_seed: Optional[int] = None,
134
+ ):
135
+ """
136
+ Initialize the Iterated Prisoner's Dilemma environment.
137
+
138
+ Args:
139
+ rounds_per_game: Number of rounds to play
140
+ reward: Payoff when both agents cooperate
141
+ punishment: Payoff when both agents defect
142
+ temptation: Payoff for defecting when other agent cooperates
143
+ sucker: Payoff for cooperating when other agent defects
144
+ seed: Random seed for reproducibility
145
+ """
146
+ # ...
147
+
148
+ def reset(self) -> Dict[str, Dict[str, Any]]:
149
+ """
150
+ Reset the environment to an initial state and return the initial observation.
151
+
152
+ Returns:
153
+ observation (dict): A dictionary where keys are agent identifiers and values are observations.
154
+ """
155
+ # ...
156
+
157
+ def step(self, actions: Dict[str, str]) -> Tuple[Dict[str, Dict[str, Any]], bool, Dict[str, Any]]:
158
+ """
159
+ Take a step in the environment using the provided actions.
160
+
161
+ Args:
162
+ actions (dict): A dictionary where keys are agent identifiers and values are actions ('C' or 'D').
163
+
164
+ Returns:
165
+ observations (dict): A dictionary where keys are agent identifiers and values are observations.
166
+ done (bool): Whether the episode has ended.
167
+ info (dict): Additional information about the environment.
168
+ """
169
+ # ...
170
+
171
+ Key Implementation Details
172
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
173
+
174
+ The ``IPDEnv`` class implements several key features:
175
+
176
+ 1. **Two-Agent Support**: The environment tracks two agents ("alice" and "bob") and manages their interactions.
177
+
178
+ 2. **Round-Based Play**: The environment enforces turn structure and tracks game history.
179
+
180
+ 3. **Payoff Matrix**: The environment calculates rewards based on the standard prisoner's dilemma payoff matrix.
181
+
182
+ 4. **Observation Generation**: The environment generates detailed observations for each agent, including action history and rewards.
183
+
184
+ 5. **Game Termination**: The environment tracks game termination after the specified number of rounds.
185
+
186
+ Observation Structure
187
+ ~~~~~~~~~~~~~~~~~~~~
188
+
189
+ Each agent receives an observation dictionary with the following structure:
190
+
191
+ .. code-block:: python
192
+
193
+ {
194
+ "current_round": int, # Current round number (0-indexed)
195
+ "rounds_per_game": int, # Total number of rounds in the game
196
+ "history": List[Dict], # Complete game history so far
197
+ "last_round_actions": Dict[str, str], # Actions from the previous round (if any)
198
+ "last_round_reward": float, # Reward received in the previous round (if any)
199
+ "total_reward": float, # Cumulative reward so far
200
+ "payoff_matrix": Dict[str, float], # The game's payoff matrix values
201
+ }
202
+
203
+ Action Structure
204
+ ~~~~~~~~~~~~~~~
205
+
206
+ Actions are simple strings:
207
+
208
+ 1. ``"C"`` for Cooperate
209
+ 2. ``"D"`` for Defect
210
+
211
+ IPDAgent
212
+ --------------
213
+
214
+ The ``IPDAgent`` class implements the agent handler interface for the Iterated Prisoner's Dilemma, processing observations from the environment and generating actions through an LLM.
215
+
216
+ .. code-block:: python
217
+
218
+ class IPDAgent:
219
+ """
220
+ Agent handler for Iterated Prisoner's Dilemma, implementing the AgentState interface
221
+ for the multi-agent negotiation standard.
222
+ """
223
+
224
+ def __init__(
225
+ self,
226
+ agent_id: str,
227
+ policy_id: str = "llm_policy",
228
+ system_prompt: Optional[str] = None,
229
+ max_errors: int = 3,
230
+ opponent_id: Optional[str] = None,
231
+ ):
232
+ """
233
+ Initialize the IPD agent handler.
234
+
235
+ Args:
236
+ agent_id: Identifier for this agent ("alice" or "bob")
237
+ policy_id: Identifier for the policy this agent uses
238
+ system_prompt: Optional custom system prompt for the LLM
239
+ max_errors: Maximum number of parsing errors before defaulting to cooperate
240
+ opponent_id: Optional identifier of the opponent (inferred if not provided)
241
+ """
242
+ # ...
243
+
244
+ def step(self, observation_from_env: Dict[str, Any], policy_output: str = None) -> Tuple[str, Dict[str, Any], str, bool, Dict[str, Any]]:
245
+ """
246
+ Update the agent state based on the observation and process the policy output.
247
+
248
+ Args:
249
+ observation_from_env: The observation from the environment
250
+ policy_output: The output from the policy (LLM response)
251
+
252
+ Returns:
253
+ policy_id: The policy identifier
254
+ policy_input: The input to the policy
255
+ action: The action to be sent to the environment
256
+ done: Whether the action is ready to be sent to the environment
257
+ info: Additional information about the agent
258
+ """
259
+ # ...
260
+
261
+ Key Implementation Details
262
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
263
+
264
+ The ``IPDAgent`` class implements several key features:
265
+
266
+ 1. **LLM Interaction**: The agent generates prompts for an LLM and processes the LLM's responses.
267
+
268
+ 2. **Action Extraction**: The agent parses the LLM's output to extract valid actions (C or D).
269
+
270
+ 3. **Error Handling**: The agent provides helpful error messages when parsing fails and defaults to cooperation after multiple failures.
271
+
272
+ 4. **History Tracking**: The agent maintains and provides the complete game history in its prompts.
273
+
274
+ 5. **Strategy Explanation**: The agent can extract and log the reasoning behind an LLM's decisions.
275
+
276
+ Prompt Structure
277
+ ~~~~~~~~~~~~~~~
278
+
279
+ The agent generates prompts that include:
280
+
281
+ 1. **System Prompt**: Instructions and context for the LLM, explaining its role and the rules of the Prisoner's Dilemma.
282
+
283
+ 2. **Game State Description**: A text description of the current game state, including:
284
+ - Current round number
285
+ - History of previous rounds (if any)
286
+ - Cumulative score
287
+
288
+ 3. **Action Request**: Instructions on how to format the response, requiring an explicit action tag.
289
+
290
+ Example system prompt:
291
+
292
+ .. code-block:: text
293
+
294
+ You are playing as Alice in an Iterated Prisoner's Dilemma game against Bob.
295
+ In each round, you must choose to either Cooperate (C) or Defect (D).
296
+
297
+ The payoffs are:
298
+ - If both players Cooperate: You each get 3 points
299
+ - If both players Defect: You each get 1 point
300
+ - If you Cooperate and Bob Defects: You get 0 points, Bob gets 5 points
301
+ - If you Defect and Bob Cooperates: You get 5 points, Bob gets 0 points
302
+
303
+ Your goal is to maximize your total points across all rounds.
304
+ The game will last for exactly 10 rounds, and both players know this.
305
+
306
+ Example game state prompt:
307
+
308
+ .. code-block:: text
309
+
310
+ Current round: 3/10
311
+
312
+ History:
313
+ Round 1: You chose C, Bob chose C. You earned 3 points.
314
+ Round 2: You chose C, Bob chose D. You earned 0 points.
315
+
316
+ Your total score so far: 3 points
317
+
318
+ What is your choice for round 3?
319
+ Please respond with <action>C</action> to cooperate or <action>D</action> to defect,
320
+ and explain your reasoning.
321
+
322
+ Running IPD Games
323
+ ----------------------
324
+
325
+ To run Iterated Prisoner's Dilemma games with LLM agents, you can use the following code structure:
326
+
327
+ .. code-block:: python
328
+
329
+ from mllm.environments.ipd.ipd_game import IPDEnv
330
+ from mllm.environments.ipd.ipd_agent import IPDAgent
331
+ from mllm.run_matches import run_batched_matches
332
+
333
+ # Create environment
334
+ env = IPDEnv(
335
+ rounds_per_game=10,
336
+ reward=3.0,
337
+ punishment=1.0,
338
+ temptation=5.0,
339
+ sucker=0.0
340
+ )
341
+
342
+ # Create agent handlers
343
+ agent_handlers = {
344
+ "alice": IPDAgent(agent_id="alice"),
345
+ "bob": IPDAgent(agent_id="bob")
346
+ }
347
+
348
+ # Define policy mapping
349
+ policy_mapping = {
350
+ "llm_policy": my_llm_policy_function
351
+ }
352
+
353
+ # Run the game
354
+ game_results = run_batched_matches(
355
+ envs=[env],
356
+ agent_handlers_per_env=[agent_handlers],
357
+ policy_mapping=policy_mapping,
358
+ max_parallel_matches=1
359
+ )
360
+
361
+ # Process results
362
+ for result in game_results:
363
+ print(f"Game finished. Scores: {result['total_rewards']}")
364
+
365
+ Statistics and Analysis
366
+ ----------------------
367
+
368
+ The IPD environment includes utility functions for analyzing game outcomes:
369
+
370
+ 1. **Cooperation Rates**: Percentage of rounds where each agent cooperated.
371
+ 2. **Mutual Cooperation/Defection**: Percentage of rounds where both agents made the same choice.
372
+ 3. **Score Distribution**: Analysis of how points were accumulated over the game.
373
+
374
+ These statistics can be calculated using the ``gather_ipd_statistics`` function:
375
+
376
+ .. code-block:: python
377
+
378
+ from mllm.environments.ipd.ipd_statistics_funcs import gather_ipd_statistics
379
+
380
+ stats = gather_ipd_statistics(match_info, env_info)
381
+ print(f"Cooperation rates: {stats['cooperation_rate']}")
382
+ print(f"Mutual cooperation rate: {stats['mutual_cooperation_rate']}")
383
+ print(f"Mutual defection rate: {stats['mutual_defection_rate']}")
384
+
385
+ Limitations and Considerations
386
+ -----------------------------
387
+
388
+ 1. **Determinism**: The environment is deterministic, with randomness only in initialization if a seed is provided.
389
+
390
+ 2. **Limited Player Count**: The IPD environment only supports exactly two players.
391
+
392
+ 3. **Perfect Information**: Both players have perfect information about the game history.
393
+
394
+ 4. **Simultaneous Actions**: Both players act simultaneously, which requires adaptations for some LLM interfaces.
395
+
396
+ 5. **Fixed Game Length**: The total number of rounds is fixed and known to both players from the start.
397
+
398
+ Advanced Usage
399
+ ------------
400
+
401
+ For advanced usage, you can customize:
402
+
403
+ 1. **Payoff Matrix**: Modify reward values to create different incentive structures.
404
+
405
+ 2. **System Prompts**: Customize the LLM's understanding of the game and potential strategies.
406
+
407
+ 3. **Error Handling**: Adjust how the agent responds to invalid LLM outputs.
408
+
409
+ 4. **Analysis**: Create custom statistics gathering for specific research questions.
410
+
411
+ 5. **Integration**: Connect the IPD environment to other negotiation frameworks or tournament systems.
src_code_for_reproducibility/docs/source/launch.rst ADDED
File without changes
src_code_for_reproducibility/docs/source/media/runbatch.png ADDED
src_code_for_reproducibility/docs/source/src.models.dummy_local_llm.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ src.models.dummy\_local\_llm module
2
+ ===================================
3
+
4
+ .. automodule:: src.models.dummy_local_llm
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
src_code_for_reproducibility/docs/source/src.models.hf_agent.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ src.models.hf\_agent module
2
+ ===========================
3
+
4
+ .. automodule:: src.models.hf_agent
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
src_code_for_reproducibility/docs/source/src.models.local_llm.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ src.models.local\_llm module
2
+ ============================
3
+
4
+ .. automodule:: src.models.local_llm
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
src_code_for_reproducibility/docs/source/src.run.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ src.run module
2
+ ==============
3
+
4
+ .. automodule:: src.run
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
src_code_for_reproducibility/docs/source/src.utils.common_imports.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ src.utils.common\_imports module
2
+ ================================
3
+
4
+ .. automodule:: src.utils.common_imports
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
src_code_for_reproducibility/docs/source/src.utils.extra_stats.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ src.utils.extra\_stats module
2
+ =============================
3
+
4
+ .. automodule:: src.utils.extra_stats
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
src_code_for_reproducibility/docs/source/src.utils.inherit_args.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ src.utils.inherit\_args module
2
+ ==============================
3
+
4
+ .. automodule:: src.utils.inherit_args
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
src_code_for_reproducibility/docs/source/src.utils.log_gpu_usage.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ src.utils.log\_gpu\_usage module
2
+ ================================
3
+
4
+ .. automodule:: src.utils.log_gpu_usage
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
src_code_for_reproducibility/docs/source/usage.rst ADDED
File without changes
src_code_for_reproducibility/markov_games/__init__.py ADDED
File without changes
src_code_for_reproducibility/markov_games/alternative_actions_runner.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import copy
3
+ import json
4
+ import os.path
5
+ from typing import Any, Tuple
6
+
7
+ from mllm.markov_games.markov_game import AgentAndActionSafeCopy, MarkovGame
8
+ from mllm.markov_games.rollout_tree import (
9
+ AgentActLog,
10
+ RolloutTreeBranchNode,
11
+ RolloutTreeNode,
12
+ RolloutTreeRootNode,
13
+ StepLog,
14
+ )
15
+
16
+ AgentId = str
17
+
18
+
19
+
20
+ async def run_with_unilateral_alt_action(
21
+ markov_game: MarkovGame,
22
+ agent_id: AgentId,
23
+ time_step: int,
24
+ branch_node: RolloutTreeBranchNode,
25
+ max_depth: int,
26
+ ):
27
+ """
28
+ This function is used to generate a new branch for a given agent.
29
+ """
30
+
31
+ # Generate alternative action and take a step
32
+ await markov_game.set_action_of_agent(agent_id)
33
+ terminated: bool = markov_game.take_simulation_step()
34
+ step_log = markov_game.get_step_log()
35
+ first_alternative_node = RolloutTreeNode(
36
+ step_log=step_log,
37
+ time_step=time_step,
38
+ )
39
+
40
+ # Generate rest of trajectory up to max depth
41
+ time_step += 1
42
+ counter = 1
43
+ previous_node = first_alternative_node
44
+ while not terminated and counter <= max_depth:
45
+ terminated, step_log = await markov_game.step()
46
+ current_node = RolloutTreeNode(step_log=step_log, time_step=time_step)
47
+ previous_node.child = current_node
48
+ previous_node = current_node
49
+ counter += 1
50
+ time_step += 1
51
+
52
+ if branch_node.branches == None:
53
+ branch_node.branches = {agent_id: [first_alternative_node]}
54
+ else:
55
+ agent_branches = branch_node.branches.get(agent_id, [])
56
+ agent_branches.append(first_alternative_node)
57
+ branch_node.branches[agent_id] = agent_branches
58
+
59
+
60
+ async def AlternativeActionsRunner(
61
+ markov_game: MarkovGame,
62
+ output_folder: str,
63
+ nb_alternative_actions: int,
64
+ max_depth: int,
65
+ branch_only_on_new_round: bool = False,
66
+ ):
67
+ """
68
+ This method generates a trajectory with partially completed branches,
69
+ where the branching comes from taking unilateraly different actions.
70
+ The resulting data is used to estimate the updated advantage alignment policy gradient terms.
71
+ Let k := nb_sub_steps. Then the number of steps generated is O(Tk), where T is
72
+ the maximum trajectory length.
73
+ """
74
+
75
+ tasks = []
76
+ time_step = 0
77
+ terminated = False
78
+ root = RolloutTreeRootNode(
79
+ id=markov_game.get_id(),
80
+ crn_id=markov_game.get_crn_id()
81
+ )
82
+ previous_node = root
83
+
84
+ while not terminated:
85
+ mg_before_action = markov_game.get_safe_copy()
86
+
87
+ # Get safe copies for main branch
88
+ agent_action_safe_copies: dict[
89
+ AgentId, AgentAndActionSafeCopy
90
+ ] = await markov_game.get_actions_of_agents_without_side_effects()
91
+
92
+ markov_game.set_actions_of_agents_manually(agent_action_safe_copies)
93
+ terminated = markov_game.take_simulation_step()
94
+ main_node = RolloutTreeNode(
95
+ step_log=markov_game.get_step_log(), time_step=time_step
96
+ )
97
+ branch_node = RolloutTreeBranchNode(main_child=main_node)
98
+ previous_node.child = branch_node
99
+ previous_node = main_node
100
+
101
+ # Get alternative branches by generating new unilateral actions
102
+ for agent_id in markov_game.agent_ids:
103
+ for _ in range(nb_alternative_actions):
104
+ # Get safe copies for branches
105
+ branch_agent_action_safe_copies: dict[
106
+ AgentId, AgentAndActionSafeCopy
107
+ ] = {
108
+ agent_id: AgentAndActionSafeCopy(
109
+ action=copy.deepcopy(agent_action_safe_copy.action),
110
+ action_info=copy.deepcopy(agent_action_safe_copy.action_info),
111
+ agent_after_action=agent_action_safe_copy.agent_after_action.get_safe_copy(),
112
+ )
113
+ for agent_id, agent_action_safe_copy in agent_action_safe_copies.items()
114
+ }
115
+ mg_branch: MarkovGame = mg_before_action.get_safe_copy()
116
+ other_agent_id = [id for id in mg_branch.agent_ids if id != agent_id][0]
117
+ mg_branch.set_action_and_agent_after_action_manually(
118
+ agent_id=other_agent_id,
119
+ agent_action_safe_copy=branch_agent_action_safe_copies[
120
+ other_agent_id
121
+ ],
122
+ )
123
+ task = asyncio.create_task(
124
+ run_with_unilateral_alt_action(
125
+ markov_game=mg_branch,
126
+ time_step=time_step,
127
+ agent_id=agent_id,
128
+ branch_node=branch_node,
129
+ max_depth=max_depth,
130
+ )
131
+ )
132
+ tasks.append(task)
133
+ time_step += 1
134
+
135
+ # wait for all branches to complete
136
+ await asyncio.gather(*tasks)
137
+
138
+ return root
src_code_for_reproducibility/markov_games/group_timesteps.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module contains the logic for grouping time steps.
3
+ """
4
+ import copy
5
+ from typing import Callable
6
+
7
+ from mllm.markov_games.markov_game import MarkovGame
8
+ from mllm.markov_games.rollout_tree import (
9
+ AgentActLog,
10
+ RolloutTreeBranchNode,
11
+ RolloutTreeNode,
12
+ RolloutTreeRootNode,
13
+ StepLog,
14
+ )
15
+ from mllm.markov_games.simulation import SimulationStepLog
16
+
17
+ AgentId = str
18
+
19
+
20
+ def group_time_steps(
21
+ rollout_tree: RolloutTreeRootNode,
22
+ accumulation_stop_condition: Callable[[StepLog], bool],
23
+ ) -> RolloutTreeRootNode:
24
+ """
25
+ During generation, we create rollout trees according to the real time steps.
26
+ However, during training, we might want to treat groups of time steps as a single time step.
27
+ As a concrete example, take Trust-and-Split. At each round, say we have X time steps of communication and then one time step for the split.
28
+ Then the communication actions will not get any reward, and the split action will get the reward. During REINFORCE training, with discounting, this
29
+ can cause training instability. We could instead treat every action in the round as being part of a single action, and give it the reward of the split action.
30
+ This method helps to do this sort of grouping.
31
+ It accumulates actions until the accumulation_stop_condition is met, and then creates a new node with the accumulated actions.
32
+ It then recursively calls itself on the child node.
33
+ Details:
34
+ - The reward for the group is the reward of the last time step in the group.
35
+ - The simulation log for the group is the simulation log of the last time step in the group.
36
+ - The state end for the group becomes the first state end in the group.
37
+ - The agent info for the group is the agent info of the last time step in the group.
38
+ """
39
+
40
+ def group_step_logs(step_logs: list[StepLog]) -> StepLog:
41
+ """
42
+ Concatenate per-agent chat turns across steps; keep only the first is_state_end.
43
+ """
44
+ last_sim_log = step_logs[-1].simulation_step_log
45
+ agent_ids = {aid for s in step_logs for aid in s.action_logs.keys()}
46
+ grouped_logs: dict[AgentId, AgentActLog] = {}
47
+ for aid in agent_ids:
48
+ turns = []
49
+ for s in step_logs:
50
+ act = s.action_logs.get(aid)
51
+ if act and act.chat_turns:
52
+ turns.extend(copy.deepcopy(act.chat_turns))
53
+ disable_is_state_end = False
54
+ # Only the first state_end should be True, the rest should be False
55
+ for t in turns:
56
+ if t.is_state_end:
57
+ if disable_is_state_end:
58
+ t.is_state_end = False
59
+ else:
60
+ disable_is_state_end = True
61
+ continue
62
+ grouped_logs[aid] = AgentActLog(
63
+ chat_turns=turns, info=step_logs[-1].action_logs[aid].info
64
+ )
65
+ return StepLog(action_logs=grouped_logs, simulation_step_log=last_sim_log)
66
+
67
+ def group_time_steps_rec(
68
+ current_node: RolloutTreeNode | RolloutTreeBranchNode,
69
+ group_time_step: int,
70
+ accumulation_step_logs: list[StepLog],
71
+ ) -> RolloutTreeNode | RolloutTreeBranchNode:
72
+ """
73
+ Groups time steps. Recursion is used to handle branches.
74
+ """
75
+ assert isinstance(current_node, RolloutTreeNode) or isinstance(
76
+ current_node, RolloutTreeBranchNode
77
+ ), "Current node must be a tree node or a branch node. Is of type: " + str(
78
+ type(current_node)
79
+ )
80
+ first_group_node = None
81
+ current_group_node = None
82
+ while current_node is not None:
83
+ if isinstance(current_node, RolloutTreeBranchNode):
84
+ raise Exception(
85
+ "Grouping timesteps by round is not supported for branching trajectories yet."
86
+ )
87
+ # Special recursive case for branches
88
+ # if isinstance(current_node, RolloutTreeBranchNode):
89
+ # branches = {}
90
+ # for agent_id, branch_nodes in current_node.branches.items():
91
+ # branch_group_nodes = []
92
+ # for branch_node in branch_nodes:
93
+ # branch_group_node = group_time_steps_rec(
94
+ # current_node=branch_node,
95
+ # group_time_step=group_time_step,
96
+ # accumulation_step_logs=copy.deepcopy(accumulation_step_logs))
97
+ # branch_group_nodes.append(branch_group_node)
98
+ # branches[agent_id] = branch_group_nodes
99
+
100
+ # main_child_group_node = group_time_steps_rec(
101
+ # current_node=current_node.main_child,
102
+ # group_time_step=group_time_step,
103
+ # accumulation_step_logs=copy.deepcopy(accumulation_step_logs))
104
+
105
+ # return RolloutTreeBranchNode(main_child=main_child_group_node, branches=branches)
106
+
107
+ # Accumulate
108
+ accumulation_step_logs.append(current_node.step_log)
109
+ if accumulation_stop_condition(current_node.step_log):
110
+ grouped_step_logs = group_step_logs(accumulation_step_logs)
111
+ accumulation_step_logs = []
112
+ new_group_node = RolloutTreeNode(
113
+ step_log=grouped_step_logs, time_step=group_time_step, child=None
114
+ )
115
+ if first_group_node == None:
116
+ first_group_node = new_group_node
117
+ group_time_step += 1
118
+ if current_group_node is not None:
119
+ current_group_node.child = new_group_node
120
+ current_group_node = new_group_node
121
+ current_node = current_node.child
122
+ return first_group_node
123
+
124
+ node = group_time_steps_rec(
125
+ current_node=rollout_tree.child, group_time_step=0, accumulation_step_logs=[]
126
+ )
127
+ return RolloutTreeRootNode(
128
+ id=rollout_tree.id,
129
+ crn_id=rollout_tree.crn_id,
130
+ child=node,
131
+ agent_ids=rollout_tree.agent_ids,
132
+ )
133
+
134
+
135
+ def stop_when_round_ends(step_log: StepLog) -> bool:
136
+ """
137
+ Simplest stop condition. Will return True if step log is the last time step of a round.
138
+ This will throw an error if this information is not available in the simulation info.
139
+ """
140
+ assert (
141
+ "is_last_timestep_in_round" in step_log.simulation_step_log.info.keys()
142
+ ), "To group by round, is_last_timestep_in_round must be set in the info of your simulation step log at each time step."
143
+ return step_log.simulation_step_log.info["is_last_timestep_in_round"]
144
+
145
+
146
+ def group_by_round(rollout_tree: RolloutTreeRootNode) -> RolloutTreeRootNode:
147
+ """
148
+ Groups time steps by round.
149
+ """
150
+ return group_time_steps(rollout_tree, stop_when_round_ends)
src_code_for_reproducibility/markov_games/markov_game.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This class unifies a simulation, and the agents acting in it (see `simulation.py` & `agent.py`).
3
+ In a MarkovGame step,
4
+ 1) each agent takes an action,
5
+ 2) the state transitions with respect to these actions,
6
+ 3) all relevant data of the step is appended to the historical data list
7
+
8
+ In order to perform 3), the agents and the simulation are expected, at each time step,
9
+ to return a log of the state transition (from their perspective).
10
+ For instance, the Simulation might send rewards and the agents might send prompting contexts to be used later to generate the training data.
11
+ A different approach would be to simply have the agents keep their data private and log it upon completion of a trajectory.
12
+ The approach we use here centralizes the data gathering aspect,
13
+ making it easy to create sub-trajectories (in the `runners` defined in `runners.py`) descriptions that
14
+ only log information for step transitions occuring after the branching out.
15
+ """
16
+ import asyncio
17
+ import copy
18
+ import json
19
+ import os
20
+ from dataclasses import dataclass
21
+ from typing import Any, List, Literal, Optional, Tuple
22
+
23
+ from transformers.models.idefics2 import Idefics2Config
24
+
25
+ from mllm.markov_games.agent import Agent
26
+ from mllm.markov_games.rollout_tree import AgentActLog, StepLog
27
+ from mllm.markov_games.simulation import Simulation
28
+
29
+ AgentId = str
30
+
31
+
32
+ @dataclass
33
+ class AgentAndActionSafeCopy:
34
+ action: Any
35
+ action_info: AgentActLog
36
+ agent_after_action: type[Agent]
37
+
38
+
39
+ class MarkovGame(object):
40
+ def __init__(
41
+ self,
42
+ id: int,
43
+ agents: dict[AgentId, type[Agent]],
44
+ simulation: type[Simulation],
45
+ crn_id: int,
46
+ ):
47
+ """
48
+ Args:
49
+ agents:
50
+ output_path:
51
+ Path where the step infos are saved.
52
+ simulation:
53
+ Simulation object. Example: IPDSimulation
54
+ """
55
+ self.agents = agents
56
+ self.agent_ids = self.agents.keys()
57
+ self.simulation = simulation
58
+ self.simulation_step_log = None
59
+ self.agent_step_logs = {agent_id: None for agent_id in self.agent_ids}
60
+ self.actions = {}
61
+ self.id = id
62
+ self.crn_id = crn_id
63
+
64
+ def get_id(self) -> str:
65
+ return self.id
66
+
67
+ def get_crn_id(self) -> int:
68
+ return self.crn_id
69
+
70
+ def get_agent_ids(self) -> List[AgentId]:
71
+ return list(self.agent_ids)
72
+
73
+ async def get_action_of_agent_without_side_effects(
74
+ self, agent_id: AgentId
75
+ ) -> Tuple[Any, AgentActLog]:
76
+ """
77
+ Safe function to get an action of an agent without modifying the agent or the simulation.
78
+ """
79
+ agent = self.agents[agent_id]
80
+ agent_before_action = agent.get_safe_copy()
81
+ obs = self.simulation.get_obs_agent(agent_id)
82
+ action, action_info = await agent.act(observation=obs)
83
+ self.agents[agent_id] = agent_before_action
84
+ agent_after_action = agent.get_safe_copy()
85
+ return AgentAndActionSafeCopy(action, action_info, agent_after_action)
86
+
87
+ async def get_actions_of_agents_without_side_effects(
88
+ self,
89
+ ) -> dict[AgentId, AgentAndActionSafeCopy]:
90
+ """
91
+ Safe function to get an action of an agent without modifying the agent or the simulation.
92
+ """
93
+ tasks = []
94
+ for agent_id in self.agent_ids:
95
+ task = asyncio.create_task(
96
+ self.get_action_of_agent_without_side_effects(agent_id)
97
+ )
98
+ tasks.append(task)
99
+ agent_and_action_safe_copies: list[
100
+ AgentAndActionSafeCopy
101
+ ] = await asyncio.gather(*tasks)
102
+ return {
103
+ agent_id: agent_and_action_safe_copy
104
+ for agent_id, agent_and_action_safe_copy in zip(
105
+ self.agent_ids, agent_and_action_safe_copies
106
+ )
107
+ }
108
+
109
+ def set_action_and_agent_after_action_manually(
110
+ self,
111
+ agent_id: AgentId,
112
+ agent_action_safe_copy: AgentAndActionSafeCopy,
113
+ ):
114
+ """
115
+ Set the action and the agent after action manually.
116
+ """
117
+ self.actions[agent_id] = agent_action_safe_copy.action
118
+ self.agent_step_logs[agent_id] = agent_action_safe_copy.action_info
119
+ self.agents[agent_id] = agent_action_safe_copy.agent_after_action
120
+
121
+ def set_actions_of_agents_manually(
122
+ self, actions: dict[AgentId, AgentAndActionSafeCopy]
123
+ ):
124
+ """
125
+ Set the actions of agents manually.
126
+ """
127
+ for agent_id, agent_action_safe_copy in actions.items():
128
+ self.set_action_and_agent_after_action_manually(
129
+ agent_id, agent_action_safe_copy
130
+ )
131
+
132
+ async def set_action_of_agent(self, agent_id: AgentId):
133
+ """
134
+ TOWRITE
135
+ """
136
+ agent = self.agents[agent_id]
137
+ obs = self.simulation.get_obs_agent(agent_id)
138
+ action, action_info = await agent.act(observation=obs)
139
+ self.actions[agent_id] = action
140
+ self.agent_step_logs[agent_id] = action_info
141
+
142
+ async def set_actions(self):
143
+ """
144
+ TOWRITE
145
+ """
146
+ # background_tasks = set()
147
+ tasks = []
148
+ for agent_id in self.agent_ids:
149
+ task = asyncio.create_task(self.set_action_of_agent(agent_id))
150
+ tasks.append(task)
151
+ await asyncio.gather(*tasks)
152
+
153
+ def take_simulation_step(self):
154
+ """
155
+ TOWRITE
156
+ """
157
+ terminated, self.simulation_step_log = self.simulation.step(self.actions)
158
+ return terminated
159
+
160
+ def get_step_log(self) -> StepLog:
161
+ """
162
+ TOWRITE
163
+ TODO: assert actions and simulation have taken step
164
+ """
165
+ step_log = StepLog(
166
+ simulation_step_log=self.simulation_step_log,
167
+ action_logs=self.agent_step_logs,
168
+ )
169
+ return step_log
170
+
171
+ async def step(self) -> Tuple[bool, StepLog]:
172
+ """
173
+ TOWRITE
174
+ """
175
+ await self.set_actions()
176
+ terminated = self.take_simulation_step()
177
+ step_log = self.get_step_log()
178
+ return terminated, step_log
179
+
180
+ def get_safe_copy(self):
181
+ """
182
+ TOWRITE
183
+ """
184
+
185
+ new_markov_game = copy.copy(self)
186
+ new_simulation = self.simulation.get_safe_copy()
187
+ new_agents = {
188
+ agent_id: agent.get_safe_copy() for agent_id, agent in self.agents.items()
189
+ }
190
+
191
+ # Reassign copied components
192
+ new_markov_game.simulation = new_simulation
193
+ new_markov_game.agents = new_agents
194
+
195
+ # IMPORTANT: ensure agent_ids references the new agents dict, not the original
196
+ new_markov_game.agent_ids = new_markov_game.agents.keys()
197
+
198
+ # Deep-copy step data to avoid correlation
199
+ new_markov_game.simulation_step_log = copy.deepcopy(self.simulation_step_log)
200
+ new_markov_game.actions = copy.deepcopy(self.actions)
201
+ # Rebuild logs to align exactly with new agent ids
202
+ old_agent_step_logs = copy.deepcopy(self.agent_step_logs)
203
+ new_markov_game.agent_step_logs = {
204
+ agent_id: old_agent_step_logs.get(agent_id)
205
+ for agent_id in new_markov_game.agent_ids
206
+ }
207
+
208
+ return new_markov_game
src_code_for_reproducibility/markov_games/mg_utils.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import copy
3
+ from collections.abc import Callable
4
+ from dataclasses import dataclass
5
+
6
+ from mllm.markov_games.ipd.ipd_agent import IPDAgent
7
+ from mllm.markov_games.ipd.ipd_simulation import IPD
8
+ from mllm.markov_games.markov_game import MarkovGame
9
+ from mllm.markov_games.negotiation.dond_agent import DealNoDealAgent
10
+ from mllm.markov_games.negotiation.dond_simulation import DealNoDealSimulation
11
+ from mllm.markov_games.negotiation.nego_hard_coded_policies import (
12
+ HardCodedNegoGreedyPolicy,
13
+ HardCodedNegoWelfareMaximizingPolicy,
14
+ )
15
+ from mllm.markov_games.ipd.Ipd_hard_coded_agents import AlwaysCooperateIPDAgent, AlwaysDefectIPDAgent
16
+ from mllm.markov_games.negotiation.no_press_nego_agent import NoPressAgent
17
+ from mllm.markov_games.negotiation.no_press_nego_simulation import NoPressSimulation
18
+ from mllm.markov_games.negotiation.tas_agent import TrustAndSplitAgent
19
+ from mllm.markov_games.negotiation.tas_rps_agent import TrustAndSplitRPSAgent
20
+ from mllm.markov_games.negotiation.tas_rps_simulation import TrustAndSplitRPSSimulation
21
+ from mllm.markov_games.negotiation.tas_simple_agent import TrustAndSplitSimpleAgent
22
+ from mllm.markov_games.negotiation.tas_simple_simulation import (
23
+ TrustAndSplitSimpleSimulation,
24
+ )
25
+ from mllm.markov_games.negotiation.tas_simulation import TrustAndSplitSimulation
26
+ from mllm.markov_games.rollout_tree import (
27
+ AgentActLog,
28
+ RolloutTreeBranchNode,
29
+ RolloutTreeNode,
30
+ RolloutTreeRootNode,
31
+ StepLog,
32
+ )
33
+ from mllm.markov_games.simulation import SimulationStepLog
34
+
35
+ AgentId = str
36
+
37
+
38
+ @dataclass
39
+ class AgentConfig:
40
+ agent_id: str
41
+ agent_name: str
42
+ agent_class_name: str
43
+ policy_id: str
44
+ init_kwargs: dict
45
+
46
+
47
+ @dataclass
48
+ class MarkovGameConfig:
49
+ id: int
50
+ seed: int
51
+ simulation_class_name: str
52
+ simulation_init_args: dict
53
+ agent_configs: list[AgentConfig]
54
+
55
+
56
+ def init_markov_game_components(
57
+ config: MarkovGameConfig, policies: dict[str, Callable[[list[dict]], str]]
58
+ ):
59
+ """
60
+ TOWRITE
61
+ """
62
+ agents = {}
63
+ agent_names = []
64
+ for agent_config in config.agent_configs:
65
+ agent_id = agent_config.agent_id
66
+ agent_name = agent_config.agent_name
67
+ agent_class = eval(agent_config.agent_class_name)
68
+ agent = agent_class(
69
+ seed=config.seed,
70
+ agent_id=agent_id,
71
+ agent_name=agent_name,
72
+ policy=policies[agent_config.policy_id],
73
+ **agent_config.init_kwargs,
74
+ )
75
+ agents[agent_id] = agent
76
+ agent_names.append(agent_name)
77
+ simulation = eval(config.simulation_class_name)(
78
+ seed=config.seed,
79
+ agent_ids=list(agents.keys()),
80
+ agent_names=agent_names,
81
+ **config.simulation_init_args,
82
+ )
83
+ markov_game = MarkovGame(
84
+ id=config.id,
85
+ crn_id=config.seed,
86
+ agents=agents,
87
+ simulation=simulation,
88
+ )
89
+ return markov_game
src_code_for_reproducibility/markov_games/rollout_tree.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ TODO: add parent to nodes so that some verification can be done. For instance, to ensure that node reward keys match the parent node.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ import json
8
+ from dataclasses import dataclass
9
+ from pathlib import Path
10
+ from typing import Any, List, Literal, Optional, Tuple
11
+
12
+ import jsonschema
13
+ from pydantic import BaseModel, Field, model_validator
14
+
15
+ from mllm.chat_utils.chat_turn import ChatTurn
16
+
17
+ AgentId = str
18
+
19
+
20
+ class SimulationStepLog(BaseModel):
21
+ rewards: dict[AgentId, float]
22
+ info: Any = None
23
+
24
+
25
+ class AgentActLog(BaseModel):
26
+ chat_turns: list[ChatTurn] | None
27
+ info: Any = None
28
+
29
+ @model_validator(mode="after")
30
+ def _exactly_one_state_end(self):
31
+ """
32
+ This method is used to enforce that for each AgentActLog, there is exactly one ChatTurn which is a state end.
33
+ """
34
+ if self.chat_turns != []:
35
+ n = sum(1 for t in self.chat_turns if t.is_state_end)
36
+ if n != 1:
37
+ raise ValueError(
38
+ f"AgentActLog must have exactly one ChatTurn with is_state_end=True; got {self.chat_turns}."
39
+ )
40
+ return self
41
+ else:
42
+ return self
43
+
44
+
45
+ class StepLog(BaseModel):
46
+ action_logs: dict[AgentId, AgentActLog]
47
+ simulation_step_log: SimulationStepLog
48
+
49
+
50
+ # BranchType = Literal["unilateral_deviation", "common_deviation"] # might not be necessary
51
+ # class BranchNodeInfo(BaseModel):
52
+ # branch_id: str
53
+ # branch_for: AgentId
54
+ # branch_type: BranchType
55
+
56
+
57
+ class RolloutTreeNode(BaseModel):
58
+ step_log: StepLog
59
+ time_step: int
60
+ child: RolloutTreeNode | RolloutTreeBranchNode | None = None
61
+
62
+
63
+ class RolloutTreeBranchNode(BaseModel):
64
+ """
65
+ First item of the tuple indicates which agent "called" for an alternative branch.
66
+ """
67
+
68
+ main_child: RolloutTreeNode
69
+ branches: dict[AgentId, list[RolloutTreeNode]] | None = None
70
+
71
+
72
+ class RolloutTreeRootNode(BaseModel):
73
+ id: int
74
+ crn_id: int # ID of the rng used to generate this rollout tree
75
+ child: RolloutTreeNode | RolloutTreeBranchNode | None = None
76
+ agent_ids: List[AgentId] = Field(min_length=1)
77
+
78
+
79
+ # class RolloutTreeLeafNode(BaseModel):
80
+ # step_log: StepLog
81
+ # time_step: int
82
+
83
+
84
+ # Necessary for self-referential stuff in pydantic
85
+ RolloutTreeBranchNode.model_rebuild()
86
+ RolloutTreeNode.model_rebuild()
src_code_for_reproducibility/markov_games/run_markov_games.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ from collections.abc import Callable
3
+ from dataclasses import dataclass
4
+
5
+ from torch._C import ClassType
6
+
7
+ from mllm.markov_games.markov_game import MarkovGame
8
+ from mllm.markov_games.rollout_tree import RolloutTreeRootNode
9
+
10
+
11
+ async def run_markov_games(
12
+ runner: Callable[[MarkovGame], RolloutTreeRootNode],
13
+ runner_kwargs: dict,
14
+ output_folder: str,
15
+ markov_games: list[MarkovGame],
16
+ ) -> list[RolloutTreeRootNode]:
17
+ tasks = []
18
+ for mg in markov_games:
19
+ tasks.append(
20
+ asyncio.create_task(
21
+ runner(markov_game=mg, output_folder=output_folder, **runner_kwargs)
22
+ )
23
+ )
24
+ return await asyncio.gather(*tasks)
src_code_for_reproducibility/markov_games/simulation.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A Simulation is the environment of a Markov Game.
3
+ The Simulation is not responsible for properly checking / formatting the responses of LLM's.
4
+ This is the job of the `Agent` class.
5
+ Simulations expect clean actions, and are defined similarly to `gymnasium` environments, except that they are adapted for the Multi-agent setting.
6
+ """
7
+
8
+ from abc import ABC, abstractmethod
9
+ from typing import Any, Tuple
10
+
11
+ from numpy.random import default_rng
12
+
13
+ from mllm.markov_games.rollout_tree import SimulationStepLog
14
+
15
+
16
+ class Simulation(ABC):
17
+ @abstractmethod
18
+ def __init__(self, seed: int, *args, **kwargs):
19
+ self.seed = seed
20
+ self.rng = default_rng(self.seed)
21
+
22
+ @abstractmethod
23
+ def step(self, actions: Any) -> Tuple[bool, SimulationStepLog]:
24
+ """
25
+ Returns terminated, info
26
+ """
27
+ raise NotImplementedError
28
+
29
+ def get_obs(self):
30
+ """Returns all agent observations in dict
31
+
32
+ Returns:
33
+ observations
34
+ """
35
+ raise NotImplementedError
36
+
37
+ def get_obs_agent(self, agent_id):
38
+ """Returns observation for agent_id"""
39
+ raise NotImplementedError
40
+
41
+ def get_obs_size(self):
42
+ """Returns the shape of the observation"""
43
+ raise NotImplementedError
44
+
45
+ def get_state(self):
46
+ raise NotImplementedError
47
+
48
+ def get_state_size(self):
49
+ """Returns the shape of the state"""
50
+ raise NotImplementedError
51
+
52
+ def get_avail_actions(self):
53
+ raise NotImplementedError
54
+
55
+ def get_avail_agent_actions(self, agent_id):
56
+ """Returns the available actions for agent_id"""
57
+ raise NotImplementedError
58
+
59
+ def get_total_actions(self):
60
+ """Returns the total number of actions an agent could ever take"""
61
+ # TODO: This is only suitable for a discrete 1 dimensional action space for each agent
62
+ raise NotImplementedError
63
+
64
+ def get_safe_copy(self):
65
+ """
66
+ Return copy of the agent object that is decorrelated from the original object.
67
+ """
68
+ raise NotImplementedError
69
+
70
+ def reset(self):
71
+ """Returns initial observations and states"""
72
+ raise NotImplementedError
73
+
74
+ def render(self):
75
+ raise NotImplementedError
76
+
77
+ def close(self):
78
+ raise NotImplementedError
79
+
80
+ # def seed(self):
81
+ # raise NotImplementedError
82
+
83
+ def save_replay(self):
84
+ raise NotImplementedError
85
+
86
+ def get_simulation_info(self):
87
+ raise NotImplementedError
src_code_for_reproducibility/markov_games/statistics_runner.py ADDED
@@ -0,0 +1,405 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import gc
4
+ import json
5
+ import pickle
6
+ from dataclasses import dataclass
7
+ from pathlib import Path
8
+ from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional
9
+
10
+ from basic_render import find_iteration_folders
11
+
12
+ from mllm.markov_games.rollout_tree import (
13
+ RolloutTreeBranchNode,
14
+ RolloutTreeNode,
15
+ RolloutTreeRootNode,
16
+ SimulationStepLog,
17
+ )
18
+
19
+
20
+ def _iterate_main_nodes(root: RolloutTreeRootNode) -> Iterator[RolloutTreeNode]:
21
+ """
22
+ Iterate the main path nodes without materializing full path lists.
23
+ """
24
+ current = root.child
25
+ while current is not None:
26
+ if isinstance(current, RolloutTreeNode):
27
+ yield current
28
+ current = current.child
29
+ elif isinstance(current, RolloutTreeBranchNode):
30
+ # Follow only the main child on the main trajectory
31
+ current = current.main_child
32
+ else:
33
+ break
34
+
35
+
36
+ def iterate_main_simulation_logs(
37
+ root: RolloutTreeRootNode,
38
+ ) -> Iterator[SimulationStepLog]:
39
+ for node in _iterate_main_nodes(root):
40
+ yield node.step_log.simulation_step_log
41
+
42
+
43
+ def stream_rollout_files(iteration_folder: Path) -> Iterator[Path]:
44
+ for p in iteration_folder.rglob("*.rt.pkl"):
45
+ if p.is_file():
46
+ yield p
47
+
48
+
49
+ def load_root(path: Path) -> RolloutTreeRootNode:
50
+ with open(path, "rb") as f:
51
+ data = pickle.load(f)
52
+ return RolloutTreeRootNode.model_validate(data)
53
+
54
+
55
+ @dataclass
56
+ class StatRecord:
57
+ mgid: int
58
+ crn_id: Optional[int]
59
+ iteration: str
60
+ values: Dict[str, Any]
61
+
62
+
63
+ class StatComputer:
64
+ """
65
+ Stateful stat computer that consumes SimulationStepLog instances
66
+ and produces final aggregated values for one rollout (mgid).
67
+ """
68
+
69
+ def update(self, sl: SimulationStepLog) -> None: # pragma: no cover - interface
70
+ raise NotImplementedError
71
+
72
+ def finalize(self) -> Dict[str, Any]: # pragma: no cover - interface
73
+ raise NotImplementedError
74
+
75
+
76
+ def run_stats(
77
+ data_root: Path,
78
+ game_name: str,
79
+ make_computers: Callable[[], List[StatComputer]],
80
+ output_filename: Optional[str] = None,
81
+ output_format: str = "json", # "json" (dict of lists) or "jsonl"
82
+ ) -> Path:
83
+ """
84
+ Compute stats across all iteration_* folders under data_root.
85
+ Writes JSONL to data_root/statistics/<output_filename or f"{game_name}.stats.jsonl">.
86
+ """
87
+ data_root = Path(data_root)
88
+ outdir = data_root / "statistics"
89
+ outdir.mkdir(parents=True, exist_ok=True)
90
+ # Choose extension by format
91
+ default_name = (
92
+ f"{game_name}.stats.json"
93
+ if output_format == "json"
94
+ else f"{game_name}.stats.jsonl"
95
+ )
96
+ outfile = outdir / (
97
+ output_filename if output_filename is not None else default_name
98
+ )
99
+
100
+ # Rewrite file each run to keep it clean and small
101
+ if outfile.exists():
102
+ outfile.unlink()
103
+
104
+ iteration_folders = find_iteration_folders(str(data_root))
105
+
106
+ # If writing JSONL, stream directly; otherwise accumulate minimal records
107
+ if output_format == "jsonl":
108
+ with open(outfile, "w", encoding="utf-8") as w:
109
+ for iteration_folder in iteration_folders:
110
+ iteration_name = Path(iteration_folder).name
111
+ for pkl_path in stream_rollout_files(Path(iteration_folder)):
112
+ root = load_root(pkl_path)
113
+
114
+ computers = make_computers()
115
+ for sl in iterate_main_simulation_logs(root):
116
+ for comp in computers:
117
+ try:
118
+ comp.update(sl)
119
+ except Exception:
120
+ continue
121
+
122
+ values: Dict[str, Any] = {}
123
+ for comp in computers:
124
+ try:
125
+ values.update(comp.finalize())
126
+ except Exception:
127
+ continue
128
+
129
+ rec = {
130
+ "mgid": getattr(root, "id", None),
131
+ "crn_id": getattr(root, "crn_id", None),
132
+ "iteration": iteration_name,
133
+ "stats": values,
134
+ }
135
+ w.write(json.dumps(rec, ensure_ascii=False) + "\n")
136
+
137
+ del root
138
+ del computers
139
+ gc.collect()
140
+ else:
141
+ # Aggregate to dict-of-lists for easier plotting
142
+ records: List[Dict[str, Any]] = []
143
+ # Process in deterministic order
144
+ for iteration_folder in iteration_folders:
145
+ iteration_name = Path(iteration_folder).name
146
+ for pkl_path in stream_rollout_files(Path(iteration_folder)):
147
+ root = load_root(pkl_path)
148
+
149
+ computers = make_computers()
150
+ for sl in iterate_main_simulation_logs(root):
151
+ for comp in computers:
152
+ try:
153
+ comp.update(sl)
154
+ except Exception:
155
+ continue
156
+
157
+ values: Dict[str, Any] = {}
158
+ for comp in computers:
159
+ try:
160
+ values.update(comp.finalize())
161
+ except Exception:
162
+ continue
163
+
164
+ records.append(
165
+ {
166
+ "mgid": getattr(root, "id", None),
167
+ "crn_id": getattr(root, "crn_id", None),
168
+ "iteration": iteration_name,
169
+ "stats": values,
170
+ }
171
+ )
172
+
173
+ del root
174
+ del computers
175
+ gc.collect()
176
+
177
+ # Build dict-of-lists with nested stats preserved
178
+ # Collect all stat keys and nested agent keys where needed
179
+ mgids: List[Any] = []
180
+ crn_ids: List[Any] = []
181
+ iterations_out: List[str] = []
182
+ # stats_out is a nested structure mirroring keys but with lists
183
+ stats_out: Dict[str, Any] = {}
184
+
185
+ # First pass to collect union of keys
186
+ stat_keys: set[str] = set()
187
+ nested_agent_keys: Dict[str, set[str]] = {}
188
+ for r in records:
189
+ stats = r.get("stats", {}) or {}
190
+ for k, v in stats.items():
191
+ stat_keys.add(k)
192
+ if isinstance(v, dict):
193
+ nested = nested_agent_keys.setdefault(k, set())
194
+ for ak in v.keys():
195
+ nested.add(str(ak))
196
+
197
+ # Initialize structure
198
+ for k in stat_keys:
199
+ if k in nested_agent_keys:
200
+ stats_out[k] = {ak: [] for ak in sorted(nested_agent_keys[k])}
201
+ else:
202
+ stats_out[k] = []
203
+
204
+ # Fill lists
205
+ for r in records:
206
+ mgids.append(r.get("mgid"))
207
+ crn_ids.append(r.get("crn_id"))
208
+ iterations_out.append(r.get("iteration"))
209
+ stats = r.get("stats", {}) or {}
210
+ for k in stat_keys:
211
+ val = stats.get(k)
212
+ if isinstance(stats_out[k], dict):
213
+ # per-agent dict
214
+ agent_dict = val if isinstance(val, dict) else {}
215
+ for ak in stats_out[k].keys():
216
+ stats_out[k][ak].append(agent_dict.get(ak))
217
+ else:
218
+ stats_out[k].append(val)
219
+
220
+ with open(outfile, "w", encoding="utf-8") as w:
221
+ json.dump(
222
+ {
223
+ "mgid": mgids,
224
+ "crn_id": crn_ids,
225
+ "iteration": iterations_out,
226
+ "stats": stats_out,
227
+ },
228
+ w,
229
+ ensure_ascii=False,
230
+ )
231
+
232
+ return outfile
233
+
234
+
235
+ def run_stats_functional(
236
+ data_root: Path,
237
+ game_name: str,
238
+ metrics: Dict[str, Callable[[SimulationStepLog], Optional[Dict[str, float]]]],
239
+ output_filename: Optional[str] = None,
240
+ output_format: str = "json",
241
+ ) -> Path:
242
+ """
243
+ Functional variant where metrics is a dict of name -> f(SimulationStepLog) -> {agent_id: value}.
244
+ Aggregates per rollout by averaging over steps where a metric produced a value.
245
+ Writes a single consolidated file in data_root/statistics/.
246
+ """
247
+ data_root = Path(data_root)
248
+ outdir = data_root / "statistics"
249
+ outdir.mkdir(parents=True, exist_ok=True)
250
+ default_name = (
251
+ f"{game_name}.stats.json"
252
+ if output_format == "json"
253
+ else f"{game_name}.stats.jsonl"
254
+ )
255
+ outfile = outdir / (
256
+ output_filename if output_filename is not None else default_name
257
+ )
258
+
259
+ if outfile.exists():
260
+ outfile.unlink()
261
+
262
+ iteration_folders = find_iteration_folders(str(data_root))
263
+
264
+ def finalize_rollout(
265
+ agg: Dict[str, Dict[str, List[float]]]
266
+ ) -> Dict[str, Dict[str, float]]:
267
+ # avg per metric per agent
268
+ result: Dict[str, Dict[str, float]] = {}
269
+ for mname, agent_values in agg.items():
270
+ result[mname] = {}
271
+ for aid, vals in agent_values.items():
272
+ if not vals:
273
+ result[mname][aid] = None # keep alignment; could be None
274
+ else:
275
+ result[mname][aid] = sum(vals) / len(vals)
276
+ return result
277
+
278
+ if output_format == "jsonl":
279
+ with open(outfile, "w", encoding="utf-8") as w:
280
+ for iteration_folder in iteration_folders:
281
+ iteration_name = Path(iteration_folder).name
282
+ for pkl_path in stream_rollout_files(Path(iteration_folder)):
283
+ root = load_root(pkl_path)
284
+
285
+ # aggregator structure: metric -> agent_id -> list of values
286
+ agg: Dict[str, Dict[str, List[float]]] = {
287
+ m: {} for m in metrics.keys()
288
+ }
289
+
290
+ for sl in iterate_main_simulation_logs(root):
291
+ for mname, fn in metrics.items():
292
+ try:
293
+ vals = fn(sl)
294
+ except Exception:
295
+ vals = None
296
+ if not vals:
297
+ continue
298
+ for aid, v in vals.items():
299
+ if v is None:
300
+ continue
301
+ lst = agg[mname].setdefault(str(aid), [])
302
+ try:
303
+ lst.append(float(v))
304
+ except Exception:
305
+ continue
306
+
307
+ values = finalize_rollout(agg)
308
+ rec = {
309
+ "mgid": getattr(root, "id", None),
310
+ "crn_id": getattr(root, "crn_id", None),
311
+ "iteration": iteration_name,
312
+ "stats": values,
313
+ }
314
+ w.write(json.dumps(rec, ensure_ascii=False) + "\n")
315
+
316
+ del root
317
+ gc.collect()
318
+ else:
319
+ records: List[Dict[str, Any]] = []
320
+ for iteration_folder in iteration_folders:
321
+ iteration_name = Path(iteration_folder).name
322
+ for pkl_path in stream_rollout_files(Path(iteration_folder)):
323
+ root = load_root(pkl_path)
324
+
325
+ agg: Dict[str, Dict[str, List[float]]] = {m: {} for m in metrics.keys()}
326
+ for sl in iterate_main_simulation_logs(root):
327
+ for mname, fn in metrics.items():
328
+ try:
329
+ vals = fn(sl)
330
+ except Exception:
331
+ vals = None
332
+ if not vals:
333
+ continue
334
+ for aid, v in vals.items():
335
+ if v is None:
336
+ continue
337
+ lst = agg[mname].setdefault(str(aid), [])
338
+ try:
339
+ lst.append(float(v))
340
+ except Exception:
341
+ continue
342
+
343
+ values = finalize_rollout(agg)
344
+ records.append(
345
+ {
346
+ "mgid": getattr(root, "id", None),
347
+ "crn_id": getattr(root, "crn_id", None),
348
+ "iteration": iteration_name,
349
+ "stats": values,
350
+ }
351
+ )
352
+
353
+ del root
354
+ gc.collect()
355
+
356
+ # Build dict-of-lists output
357
+ mgids: List[Any] = []
358
+ crn_ids: List[Any] = []
359
+ iterations_out: List[str] = []
360
+ stats_out: Dict[str, Any] = {}
361
+
362
+ stat_keys: set[str] = set()
363
+ nested_agent_keys: Dict[str, set[str]] = {}
364
+ for r in records:
365
+ stats = r.get("stats", {}) or {}
366
+ for k, v in stats.items():
367
+ stat_keys.add(k)
368
+ if isinstance(v, dict):
369
+ nested = nested_agent_keys.setdefault(k, set())
370
+ for ak in v.keys():
371
+ nested.add(str(ak))
372
+
373
+ for k in stat_keys:
374
+ if k in nested_agent_keys:
375
+ stats_out[k] = {ak: [] for ak in sorted(nested_agent_keys[k])}
376
+ else:
377
+ stats_out[k] = []
378
+
379
+ for r in records:
380
+ mgids.append(r.get("mgid"))
381
+ crn_ids.append(r.get("crn_id"))
382
+ iterations_out.append(r.get("iteration"))
383
+ stats = r.get("stats", {}) or {}
384
+ for k in stat_keys:
385
+ val = stats.get(k)
386
+ if isinstance(stats_out[k], dict):
387
+ agent_dict = val if isinstance(val, dict) else {}
388
+ for ak in stats_out[k].keys():
389
+ stats_out[k][ak].append(agent_dict.get(ak))
390
+ else:
391
+ stats_out[k].append(val)
392
+
393
+ with open(outfile, "w", encoding="utf-8") as w:
394
+ json.dump(
395
+ {
396
+ "mgid": mgids,
397
+ "crn_id": crn_ids,
398
+ "iteration": iterations_out,
399
+ "stats": stats_out,
400
+ },
401
+ w,
402
+ ensure_ascii=False,
403
+ )
404
+
405
+ return outfile
src_code_for_reproducibility/models/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (153 Bytes). View file
 
src_code_for_reproducibility/models/__pycache__/adapter_training_wrapper.cpython-312.pyc ADDED
Binary file (4.92 kB). View file
 
src_code_for_reproducibility/models/__pycache__/human_policy.cpython-312.pyc ADDED
Binary file (11.9 kB). View file
 
src_code_for_reproducibility/models/__pycache__/inference_backend.cpython-312.pyc ADDED
Binary file (2.24 kB). View file
 
src_code_for_reproducibility/models/__pycache__/inference_backend_dummy.cpython-312.pyc ADDED
Binary file (2.34 kB). View file
 
src_code_for_reproducibility/models/__pycache__/inference_backend_sglang.cpython-312.pyc ADDED
Binary file (3.67 kB). View file
 
src_code_for_reproducibility/models/__pycache__/inference_backend_vllm.cpython-312.pyc ADDED
Binary file (4.95 kB). View file
 
src_code_for_reproducibility/models/__pycache__/large_language_model_api.cpython-312.pyc ADDED
Binary file (6.94 kB). View file
 
src_code_for_reproducibility/models/__pycache__/large_language_model_local.cpython-312.pyc ADDED
Binary file (16.7 kB). View file
 
src_code_for_reproducibility/models/__pycache__/scalar_critic.cpython-312.pyc ADDED
Binary file (3.21 kB). View file
 
src_code_for_reproducibility/training/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Suppose we have a trajectory with 3 timesteps.
2
+ token: "0 1 2 3 4 5 6 7 8 9 . . . . ."
3
+ string: "A B C a b c A a A a b c A B C" (Capitalized = User, Lowercased = Assistant)
4
+ action_mask: "x x x ✓ ✓ ✓ x ✓ x ✓ ✓ ✓ x x x" (F = False, T = True)
5
+ rewards: "r r r r r r R R R R R R r r r"
6
+ timestep: "0 0 0 0 0 0 1 1 1 1 1 1 2 2 2"
7
+ state_ends: "x x ✓ x x x ✓ x x x x x x x ✓"
8
+
9
+ There must be one baseline flag per timestep!
10
+
11
+ Then, we might have
12
+
13
+ A naive way to interpret this is to think of the number of assistant messages as the number of
14
+ steps in the environment. However, this is not the case in practice. Indeed, in a
15
+ single simulation step,
16
+
17
+
18
+
19
+
20
+ A subtlety arises with credit assignment. In the multi-agent case, we might
src_code_for_reproducibility/training/credit_methods.py ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ def whiten_advantages(advantages: torch.Tensor) -> torch.Tensor:
5
+ """
6
+ Whitens the advantages.
7
+ """
8
+ whitened_advantages = (advantages - torch.mean(advantages)) / (
9
+ torch.std(advantages) + 1e-9
10
+ )
11
+ return whitened_advantages
12
+
13
+
14
+ def whiten_advantages_time_step_wise(
15
+ advantages: torch.Tensor, # (B, T)
16
+ ) -> torch.Tensor:
17
+ """
18
+ Whitens the advantages.
19
+ """
20
+ assert advantages.dim() == 2, "Wrong dimensions."
21
+ whitened_advantages_time_step_wise = (
22
+ advantages - advantages.mean(dim=0, keepdim=True)
23
+ ) / (advantages.std(dim=0, keepdim=True) + 1e-9)
24
+ return whitened_advantages_time_step_wise
25
+
26
+
27
+ def get_discounted_state_visitation_credits(
28
+ credits: torch.Tensor, discount_factor: float # (B, T)
29
+ ) -> torch.Tensor:
30
+ """
31
+ Computes discounted state visitation credits for a sequence of credits.
32
+ """
33
+ return credits * (
34
+ discount_factor ** torch.arange(credits.shape[1], device=credits.device)
35
+ )
36
+
37
+
38
+ def get_discounted_returns(
39
+ rewards: torch.Tensor, # (B, T)
40
+ discount_factor: float,
41
+ ) -> torch.Tensor:
42
+ """
43
+ Computes Monte Carlo discounted returns for a sequence of rewards.
44
+
45
+ Args:
46
+ rewards (torch.Tensor): Array of rewards for each timestep.
47
+
48
+ Returns:
49
+ torch.Tensor: Array of discounted returns.
50
+ """
51
+ assert rewards.dim() == 2, "Wrong dimensions."
52
+ B, T = rewards.shape
53
+ discounted_returns = torch.zeros_like(rewards)
54
+ accumulator = torch.zeros(B, device=rewards.device, dtype=rewards.dtype)
55
+ for t in reversed(range(T)):
56
+ accumulator = rewards[:, t] + discount_factor * accumulator
57
+ discounted_returns[:, t] = accumulator
58
+ return discounted_returns
59
+
60
+
61
+ def get_rloo_credits(credits: torch.Tensor): # (B, S)
62
+ assert credits.dim() == 2, "Wrong dimensions."
63
+ rloo_baselines = torch.zeros_like(credits)
64
+ n = credits.shape[0]
65
+ if n == 1:
66
+ return credits, rloo_baselines
67
+ rloo_baselines = (torch.sum(credits, dim=0, keepdim=True) - credits) / (n - 1)
68
+ rloo_credits = credits - rloo_baselines
69
+ return rloo_credits, rloo_baselines
70
+
71
+
72
+ def get_generalized_advantage_estimates(
73
+ rewards: torch.Tensor, # (B, T)
74
+ value_estimates: torch.Tensor, # (B, T+1)
75
+ discount_factor: float,
76
+ lambda_coef: float,
77
+ ) -> torch.Tensor:
78
+ """
79
+ Computes Generalized Advantage Estimates (GAE) for a sequence of rewards and value estimates.
80
+ See https://arxiv.org/pdf/1506.02438 for details.
81
+
82
+
83
+ Returns:
84
+ torch.Tensor: Array of GAE values.
85
+ """
86
+ assert rewards.dim() == value_estimates.dim() == 2, "Wrong dimensions."
87
+
88
+ assert (
89
+ rewards.shape[0] == value_estimates.shape[0]
90
+ ), f"Got shapes {rewards.shape} and {value_estimates.shape} of rewards and value estimates."
91
+ assert (
92
+ rewards.shape[1] == value_estimates.shape[1] - 1
93
+ ), f"Got shapes {rewards.shape} and {value_estimates.shape} of rewards and value estimates."
94
+
95
+ T = rewards.shape[1]
96
+ tds = rewards + discount_factor * value_estimates[:, 1:] - value_estimates[:, :-1]
97
+ gaes = torch.zeros_like(tds)
98
+ acc = 0.0
99
+ for t in reversed(range(T)):
100
+ acc = tds[:, t] + lambda_coef * discount_factor * acc
101
+ gaes[:, t] = acc
102
+ return gaes
103
+
104
+
105
+ def get_advantage_alignment_weights(
106
+ advantages: torch.Tensor, # (B, T)
107
+ exclude_k_equals_t: bool,
108
+ gamma: float,
109
+ ) -> torch.Tensor:
110
+ """
111
+ The advantage alignment credit is calculated as
112
+
113
+ \[
114
+ A^*(s_t, a_t, b_t) = A^1(s_t, a_t, b_t) + \beta \cdot
115
+ \left( \sum_{k < t} \gamma^{t-k} A^1(s_k, a_k, b_k) \right)
116
+ A^2(s_t, a_t, b_t)
117
+ \]
118
+
119
+ Here, the weights are defined as \( \beta \cdot
120
+ \left( \sum_{k < t} \gamma^{t-k} A^1(s_k, a_k, b_k) \)
121
+ """
122
+ T = advantages.shape[1]
123
+ discounted_advantages = advantages * (
124
+ gamma * torch.ones((1, T), device=advantages.device)
125
+ ) ** (-torch.arange(0, T, 1, device=advantages.device))
126
+ if exclude_k_equals_t:
127
+ sub = torch.eye(T, device=advantages.device)
128
+ else:
129
+ sub = torch.zeros((T, T), device=advantages.device)
130
+
131
+ # Identity is for \( k < t \), remove for \( k \leq t \)
132
+ ad_align_weights = discounted_advantages @ (
133
+ torch.triu(torch.ones((T, T), device=advantages.device)) - sub
134
+ )
135
+ t_discounts = (gamma * torch.ones((1, T), device=advantages.device)) ** (
136
+ torch.arange(0, T, 1, device=advantages.device)
137
+ )
138
+ ad_align_weights = t_discounts * ad_align_weights
139
+ return ad_align_weights
140
+
141
+
142
+ def get_advantage_alignment_credits(
143
+ a1: torch.Tensor, # (B, S)
144
+ a1_alternative: torch.Tensor, # (B, S, A)
145
+ a2: torch.Tensor, # (B, S)
146
+ exclude_k_equals_t: bool,
147
+ beta: float,
148
+ gamma: float = 1.0,
149
+ use_old_ad_align: bool = False,
150
+ use_sign: bool = False,
151
+ clipping: float | None = None,
152
+ use_time_regularization: bool = False,
153
+ force_coop_first_step: bool = False,
154
+ use_variance_regularization: bool = False,
155
+ rloo_branch: bool = False,
156
+ reuse_baseline: bool = False,
157
+ mean_normalize_ad_align: bool = False,
158
+ whiten_adalign_advantages: bool = False,
159
+ whiten_adalign_advantages_time_step_wise: bool = False,
160
+ ) -> torch.Tensor:
161
+ """
162
+ Calculate the advantage alignment credits with vectorization, as described in https://arxiv.org/abs/2406.14662.
163
+
164
+ Recall that the advantage opponent shaping term of the AdAlign policy gradient is:
165
+ \[
166
+ \beta \mathbb{E}_{\substack{
167
+ \tau \sim \text{Pr}_{\mu}^{\pi^1, \pi^2} \\
168
+ a_t' \sim \pi^1(\cdot \mid s_t)
169
+ }}
170
+ \left[\sum_{t=0}^\infty \gamma^{t}\left( \sum_{k\leq t} A^1(s_k,a^{\prime}_k,b_k) \right) A^{2}(s_t,a_t, b_t)\nabla_{\theta^1}\text{log } \pi^1(a_t|s_t) \right]
171
+ \]
172
+
173
+ This method computes the following:
174
+ \[
175
+ Credit(s_t, a_t, b_t) = \gamma^t \left[ A^1(s_t, a_t, b_t) + \beta \left( \sum_{k\leq t} A^1(s_k,a^{\prime}_k,b_k) \right) A^{2}(s_t,a_t, b_t) \right]
176
+ \]
177
+
178
+ Args:
179
+ a1: Advantages of the main trajectories for the current agent.
180
+ a1_alternative: Advantages of the alternative trajectories for the current agent.
181
+ a2: Advantages of the main trajectories for the other agent.
182
+ discount_factor: Discount factor for the advantage alignment.
183
+ beta: Beta parameter for the advantage alignment.
184
+ gamma: Gamma parameter for the advantage alignment.
185
+ use_sign_in_ad_align: Whether to use sign in the advantage alignment.
186
+
187
+ Returns:
188
+ torch.Tensor: The advantage alignment credits.
189
+ """
190
+
191
+ assert a1.dim() == a2.dim() == 2, "Advantages must be of shape (B, S)"
192
+ if a1_alternative is not None:
193
+ assert (
194
+ a1_alternative.dim() == 3
195
+ ), "Alternative advantages must be of shape (B, S, A)"
196
+ B, T, A = a1_alternative.shape
197
+ else:
198
+ B, T = a1.shape
199
+ assert a1.shape == a2.shape, "Not the same shape"
200
+
201
+ sub_tensors = {}
202
+
203
+ if use_old_ad_align:
204
+ ad_align_weights = get_advantage_alignment_weights(
205
+ advantages=a1, exclude_k_equals_t=exclude_k_equals_t, gamma=gamma
206
+ )
207
+ sub_tensors["ad_align_weights_prev"] = ad_align_weights
208
+ if exclude_k_equals_t:
209
+ ad_align_weights = gamma * ad_align_weights
210
+ else:
211
+ assert a1_alternative is not None, "Alternative advantages must be provided"
212
+ if rloo_branch:
213
+ a1_alternative = torch.cat([a1.unsqueeze(2), a1_alternative], dim=2)
214
+ a1_alternative = a1_alternative.mean(dim=2)
215
+ # print(f"a1_alternative: {a1_alternative}, a1: {a1}\n")
216
+ a1, baseline = get_rloo_credits(a1)
217
+ if reuse_baseline:
218
+ a1_alternative = a1_alternative - baseline
219
+ else:
220
+ a1_alternative, _ = get_rloo_credits(a1_alternative)
221
+ assert a1.shape == a1_alternative.shape, "Not the same shape"
222
+ ad_align_weights = get_advantage_alignment_weights(
223
+ advantages=a1_alternative,
224
+ exclude_k_equals_t=exclude_k_equals_t,
225
+ gamma=gamma,
226
+ )
227
+ sub_tensors["ad_align_weights"] = ad_align_weights
228
+
229
+ # Use sign
230
+ if use_sign:
231
+ assert beta == 1.0, "beta should be 1.0 when using sign"
232
+ positive_signs = ad_align_weights > 0
233
+ negative_signs = ad_align_weights < 0
234
+ ad_align_weights[positive_signs] = 1
235
+ ad_align_weights[negative_signs] = -1
236
+ sub_tensors["ad_align_weights_sign"] = ad_align_weights
237
+ # (rest are 0)
238
+
239
+ ###################
240
+ # Process weights
241
+ ###################
242
+
243
+ # Use clipping
244
+ if clipping not in [0.0, None]:
245
+ upper_mask = ad_align_weights > 1
246
+ lower_mask = ad_align_weights < -1
247
+
248
+ ad_align_weights = torch.clip(
249
+ ad_align_weights,
250
+ -clipping,
251
+ clipping,
252
+ )
253
+ clipping_ratio = (
254
+ torch.sum(upper_mask) + torch.sum(lower_mask)
255
+ ) / upper_mask.size
256
+ sub_tensors["clipped_ad_align_weights"] = ad_align_weights
257
+
258
+ # 1/1+t Regularization
259
+ if use_time_regularization:
260
+ t_values = torch.arange(1, T + 1).to(ad_align_weights.device)
261
+ ad_align_weights = ad_align_weights / t_values
262
+ sub_tensors["time_regularized_ad_align_weights"] = ad_align_weights
263
+
264
+ # Use coop on t=0
265
+ if force_coop_first_step:
266
+ ad_align_weights[:, 0] = 1
267
+ sub_tensors["coop_first_step_ad_align_weights"] = ad_align_weights
268
+ # # Normalize alignment terms (across same time step)
269
+ # if use_variance_regularization_in_ad_align:
270
+ # # TODO: verify
271
+ # reg_coef = torch.std(a1[:, -1]) / (torch.std(opp_shaping_terms[:, -1]) + 1e-9)
272
+ # opp_shaping_terms *= reg_coef
273
+
274
+ ####################################
275
+ # Compose elements together
276
+ ####################################
277
+
278
+ opp_shaping_terms = beta * ad_align_weights * a2
279
+ sub_tensors["ad_align_opp_shaping_terms"] = opp_shaping_terms
280
+
281
+ credits = a1 + opp_shaping_terms
282
+ if mean_normalize_ad_align:
283
+ credits = credits - credits.mean(dim=0)
284
+ sub_tensors["mean_normalized_ad_align_credits"] = credits
285
+ if whiten_adalign_advantages:
286
+ credits = (credits - credits.mean()) / (credits.std() + 1e-9)
287
+ sub_tensors["whitened_ad_align_credits"] = credits
288
+ if whiten_adalign_advantages_time_step_wise:
289
+ credits = (credits - credits.mean(dim=0, keepdim=True)) / (
290
+ credits.std(dim=0, keepdim=True) + 1e-9
291
+ )
292
+ sub_tensors["whitened_ad_align_credits_time_step_wise"] = credits
293
+ sub_tensors["final_ad_align_credits"] = credits
294
+
295
+ return credits, sub_tensors
src_code_for_reproducibility/training/tally_tokenwise.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from typing import Any, Dict, List, Tuple, Union
4
+
5
+ import numpy as np
6
+ import pandas as pd
7
+ import torch
8
+ from transformers import AutoTokenizer
9
+
10
+
11
+ class ContextualizedTokenwiseTally:
12
+ """
13
+ Collect, store, and save token-level metrics per rollout.
14
+
15
+ - One DataFrame per rollout_id in `paths`
16
+ - Index = timestep (int)
17
+ - Columns are added incrementally via `add_contexts()` and `add_data()`
18
+ - Cells may contain scalars, strings, or lists (dtype=object)
19
+ """
20
+
21
+ def __init__(
22
+ self,
23
+ tokenizer: AutoTokenizer,
24
+ paths: List[str],
25
+ max_context_length: int = 30,
26
+ ):
27
+ """
28
+ Args:
29
+ tokenizer: HuggingFace tokenizer used to convert tids -> tokens
30
+ paths: rollout identifiers (parallel to batch dimension)
31
+ max_context_length: truncate context token lists to this length
32
+ """
33
+ self.tokenizer = tokenizer
34
+ self.paths = paths
35
+ self.max_context_length = max_context_length
36
+ self.tally: Dict[str, pd.DataFrame] = {path: pd.DataFrame() for path in paths}
37
+
38
+ # set later by setters
39
+ self.contexts: torch.Tensor | None = None
40
+ self.action_mask: torch.Tensor | None = None
41
+ self.range: Tuple[int, int] | None = None
42
+
43
+ # --------- Utilities ---------
44
+
45
+ def tids_to_str(self, tids: List[int]) -> List[str]:
46
+ """Convert a list of token IDs to a list of token strings."""
47
+ return self.tokenizer.convert_ids_to_tokens(tids)
48
+
49
+ def _ensure_ready(self):
50
+ assert self.action_mask is not None, "call set_action_mask(mask) first"
51
+ assert self.range is not None, "call set_range((start, end)) first"
52
+
53
+ @staticmethod
54
+ def _sanitize_filename(name: Any) -> str:
55
+ """Make a safe filename from any rollout_id."""
56
+ s = str(name)
57
+ bad = {os.sep, " ", ":", "|", "<", ">", '"', "'"}
58
+ if os.altsep is not None:
59
+ bad.add(os.altsep)
60
+ for ch in bad:
61
+ s = s.replace(ch, "_")
62
+ return s
63
+
64
+ @staticmethod
65
+ def _pad_left(seq: List[Any], length: int, pad_val: Any = "") -> List[Any]:
66
+ """Left-pad a sequence to `length` with `pad_val`."""
67
+ if len(seq) >= length:
68
+ return seq[-length:]
69
+ return [pad_val] * (length - len(seq)) + list(seq)
70
+
71
+ # --------- Setters ---------
72
+
73
+ def set_action_mask(self, action_mask: torch.Tensor):
74
+ """
75
+ action_mask: (B, S) bool or 0/1 indicating valid steps
76
+ """
77
+ self.action_mask = action_mask
78
+
79
+ def set_range(self, range: Tuple[int, int]):
80
+ """
81
+ range: slice (start, end) into self.paths for current batch
82
+ """
83
+ self.range = range
84
+
85
+ # --------- Column builders ---------
86
+
87
+ def add_contexts(self, contexts: torch.Tensor):
88
+ """
89
+ Add a single 'context' column (list[str]) for valid steps.
90
+
91
+ Expects `contexts` with shape (B, S): token id at each timestep.
92
+ For each valid timestep t, we use the last N tokens up to and including t:
93
+ window = contexts[i, max(0, t - N + 1) : t + 1]
94
+ The list is left-padded with "" to always be length N.
95
+ """
96
+ self._ensure_ready()
97
+
98
+ current_paths = self.paths[self.range[0] : self.range[1]]
99
+ B, S = contexts.shape
100
+ N = self.max_context_length
101
+
102
+ # to CPU ints once
103
+ contexts_cpu = contexts.detach().to("cpu")
104
+
105
+ for i in range(B):
106
+ rollout_id = current_paths[i]
107
+ df = self.tally.get(rollout_id, pd.DataFrame())
108
+
109
+ valid_idx = torch.nonzero(
110
+ self.action_mask[i].bool(), as_tuple=False
111
+ ).squeeze(-1)
112
+ if valid_idx.numel() == 0:
113
+ self.tally[rollout_id] = df
114
+ continue
115
+
116
+ idx_list = valid_idx.tolist()
117
+
118
+ # ensure index contains valid steps
119
+ if df.empty:
120
+ df = pd.DataFrame(index=idx_list)
121
+ else:
122
+ new_index = sorted(set(df.index.tolist()) | set(idx_list))
123
+ if list(df.index) != new_index:
124
+ df = df.reindex(new_index)
125
+
126
+ # build context windows
127
+ ctx_token_lists = []
128
+ for t in idx_list:
129
+ start = max(0, t - N + 1)
130
+ window_ids = contexts_cpu[i, start : t + 1].tolist()
131
+ window_toks = self.tids_to_str([int(x) for x in window_ids])
132
+ if len(window_toks) < N:
133
+ window_toks = [""] * (N - len(window_toks)) + window_toks
134
+ else:
135
+ window_toks = window_toks[-N:]
136
+ ctx_token_lists.append(window_toks)
137
+
138
+ # single 'context' column
139
+ if "context" not in df.columns:
140
+ df["context"] = pd.Series(index=df.index, dtype=object)
141
+ df.loc[idx_list, "context"] = pd.Series(
142
+ ctx_token_lists, index=idx_list, dtype=object
143
+ )
144
+
145
+ self.tally[rollout_id] = df
146
+
147
+ def add_data(
148
+ self,
149
+ metric_id: str,
150
+ metrics: torch.Tensor,
151
+ to_tids: bool = False,
152
+ ):
153
+ """
154
+ Add a metric column for valid steps.
155
+
156
+ Args:
157
+ metric_id: column name
158
+ metrics: shape (B, S) for scalars/ids or (B, S, K) for top-k vectors
159
+ to_tids: if True, treat ints/lists of ints as tids and convert to tokens
160
+ """
161
+ self._ensure_ready()
162
+ current_paths = self.paths[self.range[0] : self.range[1]]
163
+
164
+ if metrics.dim() == 2:
165
+ B, S = metrics.shape
166
+ elif metrics.dim() == 3:
167
+ B, S, _ = metrics.shape
168
+ else:
169
+ raise ValueError("metrics must be (B, S) or (B, S, K)")
170
+
171
+ for i in range(B):
172
+ rollout_id = current_paths[i]
173
+ df = self.tally.get(rollout_id, pd.DataFrame())
174
+
175
+ valid_idx = torch.nonzero(
176
+ self.action_mask[i].bool(), as_tuple=False
177
+ ).squeeze(-1)
178
+ if valid_idx.numel() == 0:
179
+ self.tally[rollout_id] = df
180
+ continue
181
+
182
+ idx_list = valid_idx.detach().cpu().tolist()
183
+
184
+ # Ensure index contains valid steps
185
+ if df.empty:
186
+ df = pd.DataFrame(index=idx_list)
187
+ else:
188
+ new_index = sorted(set(df.index.tolist()) | set(idx_list))
189
+ if list(df.index) != new_index:
190
+ df = df.reindex(new_index)
191
+
192
+ # Slice metrics at valid steps
193
+ m_valid = metrics[i][valid_idx]
194
+
195
+ # -> pure python lists (1D list or list-of-lists)
196
+ values = m_valid.detach().cpu().tolist()
197
+
198
+ # optional tids -> tokens
199
+ if to_tids:
200
+
201
+ def _to_tokish(x):
202
+ if isinstance(x, list):
203
+ return self.tids_to_str([int(v) for v in x])
204
+ else:
205
+ return self.tids_to_str([int(x)])[0]
206
+
207
+ values = [_to_tokish(v) for v in values]
208
+
209
+ # Ensure column exists with object dtype, then assign via aligned Series
210
+ if metric_id not in df.columns:
211
+ df[metric_id] = pd.Series(index=df.index, dtype=object)
212
+
213
+ if isinstance(values, np.ndarray):
214
+ values = values.tolist()
215
+
216
+ if len(values) != len(idx_list):
217
+ raise ValueError(
218
+ f"Length mismatch for '{metric_id}': values={len(values)} vs idx_list={len(idx_list)}"
219
+ )
220
+
221
+ df.loc[idx_list, metric_id] = pd.Series(
222
+ values, index=idx_list, dtype=object
223
+ )
224
+ self.tally[rollout_id] = df
225
+
226
+ # --------- Saving ---------
227
+
228
+ def save(self, path: str):
229
+ """
230
+ Write a manifest JSON and one CSV per rollout.
231
+
232
+ - Manifest includes metadata only (safe to JSON).
233
+ - Each rollout CSV is written with index label 'timestep'.
234
+ - Only a single 'context' column (list[str]).
235
+ """
236
+ if not self.tally or all(df.empty for df in self.tally.values()):
237
+ return
238
+
239
+ os.makedirs(path, exist_ok=True)
240
+ from datetime import datetime
241
+
242
+ now = datetime.now()
243
+
244
+ manifest = {
245
+ "created_at": f"{now:%Y-%m-%d %H:%M:%S}",
246
+ "max_context_length": self.max_context_length,
247
+ "num_rollouts": len(self.tally),
248
+ "rollouts": [],
249
+ }
250
+
251
+ for rid, df in self.tally.items():
252
+ rid_str = str(rid)
253
+ safe_name = self._sanitize_filename(rid_str)
254
+ csv_path = os.path.join(path, f"{safe_name}_tokenwise.csv")
255
+
256
+ # Put 'context' first, then the rest
257
+ cols = ["context"] + [c for c in df.columns if c != "context"]
258
+ try:
259
+ df[cols].to_csv(csv_path, index=True, index_label="timestep")
260
+ except Exception as e:
261
+ continue
262
+
263
+ manifest["rollouts"].append(
264
+ {
265
+ "rollout_id": rid_str,
266
+ "csv": csv_path,
267
+ "num_rows": int(df.shape[0]),
268
+ "columns": cols,
269
+ }
270
+ )
271
+
272
+ manifest_path = os.path.join(
273
+ path, f"tokenwise_manifest_{now:%Y-%m-%d___%H-%M-%S}.json"
274
+ )
275
+ with open(manifest_path, "w") as fp:
276
+ json.dump(manifest, fp, indent=2)
src_code_for_reproducibility/training/trainer_ad_align.py ADDED
@@ -0,0 +1,492 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import logging
3
+ import sys
4
+ from dataclasses import dataclass
5
+ from typing import Tuple
6
+
7
+ import torch
8
+ from torch.nn.utils.rnn import pad_sequence
9
+
10
+ from mllm.markov_games.rollout_tree import (
11
+ ChatTurn,
12
+ RolloutTreeBranchNode,
13
+ RolloutTreeRootNode,
14
+ )
15
+ from mllm.training.credit_methods import (
16
+ get_advantage_alignment_credits,
17
+ get_discounted_state_visitation_credits,
18
+ )
19
+ from mllm.training.tally_metrics import Tally
20
+ from mllm.training.tally_rollout import RolloutTally, RolloutTallyItem
21
+ from mllm.training.tally_tokenwise import ContextualizedTokenwiseTally
22
+ from mllm.training.tokenize_chats import process_training_chat
23
+ from mllm.training.trainer_common import BaseTrainer
24
+ from mllm.training.training_data_utils import (
25
+ AdvantagePacket,
26
+ TrainingBatch,
27
+ TrainingChatTurn,
28
+ TrajectoryBatch,
29
+ get_main_chat_list_and_rewards,
30
+ get_tokenwise_credits,
31
+ )
32
+ from mllm.utils.resource_context import resource_logger_context
33
+
34
+ logger = logging.getLogger(__name__)
35
+ logger.addHandler(logging.StreamHandler(sys.stdout))
36
+
37
+ RolloutId = int
38
+ AgentId = str
39
+
40
+
41
+ @dataclass
42
+ class AdAlignTrainingData:
43
+ agent_id: str
44
+ main_data: TrajectoryBatch
45
+ # list-of-tensors: per rollout advantages with length jT
46
+ main_advantages: list[torch.FloatTensor] | None = None
47
+ # list-of-tensors: per rollout matrix (jT, A)
48
+ alternative_advantages: list[torch.FloatTensor] | None = None
49
+ advantage_alignment_credits: list[torch.FloatTensor] | None = None
50
+
51
+
52
+ def get_alternative_chat_histories(
53
+ agent_id: str, root: RolloutTreeRootNode
54
+ ) -> list[list[TrainingChatTurn], list[torch.FloatTensor]]:
55
+ """
56
+ args:
57
+ agent_id: The agent we want to get the chat history for.
58
+ root: The root of the rollout tree.
59
+ returns:
60
+ alternative_chats: list[list[TrainingChatTurn]] (jT*A, jS')
61
+ alternative_rewards: list[torch.FloatTensor] (jT*A, jT')
62
+ """
63
+ current_node = root.child
64
+ branches = current_node.branches
65
+ pre_branch_chat = []
66
+ pre_branch_rewards = []
67
+ alternative_rewards = []
68
+ alternative_chats = []
69
+ while current_node is not None:
70
+ assert isinstance(
71
+ current_node, RolloutTreeBranchNode
72
+ ), "Current node should be a branch node."
73
+ main_node = current_node.main_child
74
+ branches = current_node.branches
75
+ current_node = main_node.child
76
+
77
+ # Get the `A` alternative trajectories
78
+ alternative_nodes = branches[agent_id]
79
+ for alt_node in alternative_nodes:
80
+ post_branch_chat, post_branch_rewards = get_main_chat_list_and_rewards(
81
+ agent_id=agent_id, root=alt_node
82
+ )
83
+ branch_chat = pre_branch_chat + post_branch_chat
84
+ alternative_chats.append(branch_chat)
85
+ alternative_rewards.append(
86
+ torch.cat([torch.tensor(pre_branch_rewards), post_branch_rewards])
87
+ )
88
+
89
+ chat_turns: list[ChatTurn] = main_node.step_log.action_logs[agent_id].chat_turns
90
+ chat_turns: list[TrainingChatTurn] = [
91
+ TrainingChatTurn(time_step=main_node.time_step, **turn.model_dump())
92
+ for turn in chat_turns
93
+ ]
94
+
95
+ pre_branch_chat.extend(chat_turns)
96
+ pre_branch_rewards.append(
97
+ main_node.step_log.simulation_step_log.rewards[agent_id]
98
+ )
99
+
100
+ return alternative_chats, alternative_rewards
101
+
102
+
103
+ class TrainerAdAlign(BaseTrainer):
104
+ """
105
+ Extends the reinforce trainer to support Advantage Alignment.
106
+ """
107
+
108
+ def __init__(
109
+ self,
110
+ ad_align_beta: float,
111
+ ad_align_gamma: float,
112
+ ad_align_exclude_k_equals_t: bool,
113
+ ad_align_use_sign: bool,
114
+ ad_align_clipping: float,
115
+ ad_align_force_coop_first_step: bool,
116
+ use_old_ad_align: bool,
117
+ use_time_regularization: bool,
118
+ rloo_branch: bool,
119
+ reuse_baseline: bool,
120
+ ad_align_beta_anneal_step: int = -1,
121
+ ad_align_beta_anneal_rate: float = 0.5,
122
+ min_ad_align_beta: float = 0.1,
123
+ mean_normalize_ad_align: bool = False,
124
+ whiten_adalign_advantages: bool = False,
125
+ whiten_adalign_advantages_time_step_wise: bool = False,
126
+ *args,
127
+ **kwargs,
128
+ ):
129
+ """
130
+ Initialize the advantage alignment trainer.
131
+ Args:
132
+ ad_align_beta: Beta parameter for the advantage alignment.
133
+ ad_align_gamma: Gamma parameter for the advantage alignment.
134
+ ad_align_exclude_k_equals_t: Whether to include k = t in the advantage alignment.
135
+ ad_align_use_sign: Whether to use sign in the advantage alignment.
136
+ ad_align_clipping: Clipping value for the advantage alignment.
137
+ ad_align_force_coop_first_step: Whether to force coop on the first step of the advantage alignment.
138
+ """
139
+ super().__init__(*args, **kwargs)
140
+ self.ad_align_beta = ad_align_beta
141
+ self.ad_align_gamma = ad_align_gamma
142
+ self.ad_align_exclude_k_equals_t = ad_align_exclude_k_equals_t
143
+ self.ad_align_use_sign = ad_align_use_sign
144
+ self.ad_align_clipping = ad_align_clipping
145
+ self.ad_align_force_coop_first_step = ad_align_force_coop_first_step
146
+ self.use_old_ad_align = use_old_ad_align
147
+ self.use_time_regularization = use_time_regularization
148
+ self.rloo_branch = rloo_branch
149
+ self.reuse_baseline = reuse_baseline
150
+ self.ad_align_beta_anneal_step = ad_align_beta_anneal_step
151
+ self.ad_align_beta_anneal_rate = ad_align_beta_anneal_rate
152
+ self.min_ad_align_beta = min_ad_align_beta
153
+ self.past_ad_align_step = -1
154
+ self.mean_normalize_ad_align = mean_normalize_ad_align
155
+ self.whiten_adalign_advantages = whiten_adalign_advantages
156
+ self.whiten_adalign_advantages_time_step_wise = (
157
+ whiten_adalign_advantages_time_step_wise
158
+ )
159
+ self.training_data: dict[AgentId, AdAlignTrainingData] = {}
160
+ self.debug_path_list: list[str] = []
161
+
162
+ def set_agent_trajectory_data(
163
+ self, agent_id: str, roots: list[RolloutTreeRootNode]
164
+ ):
165
+ """
166
+ TOWRITE
167
+ Set the advantage alignment data for the trainer.
168
+ """
169
+
170
+ B = len(roots) # Number of rollouts
171
+
172
+ # For main rollouts
173
+ batch_rollout_ids = []
174
+ batch_crn_ids = []
175
+ batch_input_ids = []
176
+ batch_action_mask = []
177
+ batch_entropy_mask = []
178
+ batch_timesteps = []
179
+ batch_state_ends_mask = []
180
+ batch_engine_log_probs = []
181
+ batch_rewards = []
182
+
183
+ # For alternative actions rollouts
184
+ batch_branching_time_steps = []
185
+ alternative_batch_input_ids = []
186
+ alternative_batch_action_mask = []
187
+ alternative_batch_entropy_mask = []
188
+ alternative_batch_timesteps = []
189
+ alternative_batch_state_ends_mask = []
190
+ alternative_batch_engine_log_probs = []
191
+ alternative_batch_rewards = []
192
+ jT_list = []
193
+
194
+ try:
195
+ A = len(roots[0].child.branches[agent_id]) # Number of alternative actions
196
+ except:
197
+ A = 0
198
+
199
+ for root in roots:
200
+ rollout_id = root.id
201
+ self.debug_path_list.append(
202
+ "mgid:" + str(rollout_id) + "_agent_id:" + agent_id
203
+ )
204
+ # Get main trajectory
205
+ batch_rollout_ids.append(rollout_id)
206
+ batch_crn_ids.append(root.crn_id)
207
+ main_chat, main_rewards = get_main_chat_list_and_rewards(
208
+ agent_id=agent_id, root=root
209
+ )
210
+ (
211
+ input_ids,
212
+ action_mask,
213
+ entropy_mask,
214
+ timesteps,
215
+ state_ends_mask,
216
+ engine_log_probs,
217
+ ) = process_training_chat(
218
+ tokenizer=self.tokenizer,
219
+ chat_history=main_chat,
220
+ entropy_mask_regex=self.entropy_mask_regex,
221
+ exploration_prompts_to_remove=self.exploration_prompts_to_remove,
222
+ )
223
+ batch_input_ids.append(input_ids)
224
+ batch_action_mask.append(action_mask)
225
+ batch_entropy_mask.append(entropy_mask)
226
+ batch_timesteps.append(timesteps)
227
+ batch_state_ends_mask.append(state_ends_mask)
228
+ batch_engine_log_probs.append(engine_log_probs)
229
+ batch_rewards.append(main_rewards)
230
+ jT = main_rewards.numel() # TODO: better than this
231
+ jT_list.append(jT)
232
+ if A > 0:
233
+ # We get the branching time steps for each of the `jT` time steps in the main trajectory.
234
+ branching_time_steps = [bt for item in range(jT) for bt in A * [item]]
235
+ batch_branching_time_steps.extend(branching_time_steps)
236
+
237
+ # Get all of the (jT*A) alternative trajectories in the tree
238
+ # (jT is the number of time steps in the main trajectory, A is the number of alternative actions)
239
+ alternative_chats, alternative_rewards = get_alternative_chat_histories(
240
+ agent_id=agent_id, root=root
241
+ )
242
+ assert (
243
+ len(alternative_chats) == A * jT
244
+ ), "Incorrect number of alternative trajectories."
245
+
246
+ for chat, rewards in zip(alternative_chats, alternative_rewards):
247
+ (
248
+ input_ids,
249
+ action_mask,
250
+ entropy_mask,
251
+ timesteps,
252
+ state_ends_mask,
253
+ engine_log_probs,
254
+ ) = process_training_chat(
255
+ tokenizer=self.tokenizer,
256
+ chat_history=chat,
257
+ entropy_mask_regex=self.entropy_mask_regex,
258
+ exploration_prompts_to_remove=self.exploration_prompts_to_remove,
259
+ )
260
+ alternative_batch_input_ids.append(input_ids)
261
+ alternative_batch_action_mask.append(action_mask)
262
+ alternative_batch_entropy_mask.append(entropy_mask)
263
+ alternative_batch_timesteps.append(timesteps)
264
+ alternative_batch_state_ends_mask.append(state_ends_mask)
265
+ alternative_batch_engine_log_probs.append(engine_log_probs)
266
+ alternative_batch_rewards.append(rewards)
267
+
268
+ jT_list = torch.Tensor(jT_list)
269
+
270
+ # Assert that number of alternative actions is constant
271
+ # assert len(set(nb_alternative_actions)) == 1, "Number of alternative actions must be constant"
272
+ # A = nb_alternative_actions[0]
273
+
274
+ trajectory_batch = TrajectoryBatch(
275
+ rollout_ids=torch.tensor(batch_rollout_ids, dtype=torch.int32), # (B,)
276
+ crn_ids=torch.tensor(batch_crn_ids, dtype=torch.int32),
277
+ agent_ids=[agent_id] * len(batch_rollout_ids),
278
+ batch_input_ids=batch_input_ids,
279
+ batch_action_mask=batch_action_mask,
280
+ batch_entropy_mask=batch_entropy_mask,
281
+ batch_timesteps=batch_timesteps,
282
+ batch_state_ends_mask=batch_state_ends_mask,
283
+ batch_engine_log_probs=batch_engine_log_probs,
284
+ batch_rewards=batch_rewards,
285
+ )
286
+ # Get Advantages & Train Critic
287
+ with resource_logger_context(
288
+ logger, "Get advantages with critic gradient accumulation"
289
+ ):
290
+ self.batch_advantages: torch.FloatTensor = (
291
+ self.get_advantages_with_critic_gradient_accumulation(trajectory_batch)
292
+ ) # (B, jT)
293
+
294
+ if A > 0:
295
+ # Here, `A` is the number of alternative actions / trajectories taken at each time step.
296
+ # For each of the `B` rollout perspectives, at each of its jT (`j` is for jagged, since each main rollout may be of a different length) steps, we take A alternate trajectories (from different actions).
297
+ # Therefore, we have ∑jT * A trajectories to process. If each of the main trajectories have T steps, we will have `B*T*A` to process.
298
+ with resource_logger_context(logger, "Create alternative trajectory batch"):
299
+ sum_jT = int(torch.sum(jT_list).item())
300
+ jT_list = (
301
+ jT_list.int().tolist()
302
+ ) # (jT,) # (we only want the advantages where we branched out)
303
+ alternative_trajectory_batch = TrajectoryBatch(
304
+ rollout_ids=torch.zeros(A * sum_jT, dtype=torch.int32),
305
+ crn_ids=torch.zeros(A * sum_jT, dtype=torch.int32),
306
+ agent_ids=[agent_id] * (A * sum_jT),
307
+ batch_input_ids=alternative_batch_input_ids,
308
+ batch_action_mask=alternative_batch_action_mask,
309
+ batch_entropy_mask=alternative_batch_entropy_mask,
310
+ batch_timesteps=alternative_batch_timesteps,
311
+ batch_state_ends_mask=alternative_batch_state_ends_mask,
312
+ batch_engine_log_probs=alternative_batch_engine_log_probs,
313
+ batch_rewards=alternative_batch_rewards,
314
+ )
315
+
316
+ # Get alternative advantages
317
+ # BAAs stands for batch alternative advantages
318
+ # (torch nested tensors have very little api support, so we have to do some odd manual work here)
319
+ with resource_logger_context(
320
+ logger, "Compute alternative advantage estimates"
321
+ ):
322
+ BAAs_list = self.get_advantages_with_critic_gradient_accumulation(
323
+ alternative_trajectory_batch
324
+ ) # list length (∑jT * A), each (jT',)
325
+ # Pad alternative advantages to (∑jT*A, P)
326
+
327
+ BAAs_padded = pad_sequence(
328
+ BAAs_list, batch_first=True, padding_value=0.0
329
+ )
330
+ branch_idx = torch.tensor(
331
+ batch_branching_time_steps,
332
+ device=BAAs_padded.device,
333
+ dtype=torch.long,
334
+ )
335
+ gathered = BAAs_padded.gather(
336
+ dim=1, index=branch_idx.unsqueeze(1)
337
+ ).squeeze(1)
338
+ # Reshape and split per rollout, then transpose to (jT_i, A)
339
+ gathered = gathered.view(A, sum_jT) # (A, ∑jT)
340
+ blocks = list(
341
+ torch.split(gathered, jT_list, dim=1)
342
+ ) # len B, shapes (A, jT_i)
343
+ BAAs = [
344
+ blk.transpose(0, 1).contiguous() for blk in blocks
345
+ ] # list of (jT_i, A)
346
+ if self.ad_align_beta_anneal_step > 0:
347
+ max_rollout_id = torch.max(trajectory_batch.rollout_ids) + 1
348
+ if (
349
+ max_rollout_id % self.ad_align_beta_anneal_step == 0
350
+ and self.past_ad_align_step != max_rollout_id
351
+ ):
352
+ self.ad_align_beta = max(
353
+ self.ad_align_beta * self.ad_align_beta_anneal_rate,
354
+ self.min_ad_align_beta,
355
+ )
356
+ logger.info(f"Annealing ad_align_beta to {self.ad_align_beta}")
357
+ self.past_ad_align_step = max_rollout_id
358
+ self.training_data[agent_id] = AdAlignTrainingData(
359
+ agent_id=agent_id,
360
+ main_data=trajectory_batch,
361
+ main_advantages=self.batch_advantages,
362
+ alternative_advantages=BAAs if A > 0 else None,
363
+ )
364
+
365
+ def share_advantage_data(self) -> list[AdvantagePacket]:
366
+ """
367
+ Share the advantage alignment data with other agents.
368
+ Returns:
369
+ AdvantagePacket: The advantage packet containing the agent's advantages.
370
+ """
371
+ logger.info(f"Sharing advantage alignment data.")
372
+ advantage_packets = []
373
+ for _, agent_data in self.training_data.items():
374
+ advantage_packets.append(
375
+ AdvantagePacket(
376
+ agent_id=agent_data.agent_id,
377
+ rollout_ids=agent_data.main_data.rollout_ids,
378
+ main_advantages=agent_data.main_advantages,
379
+ )
380
+ )
381
+ return advantage_packets
382
+
383
+ def receive_advantage_data(self, advantage_packets: list[AdvantagePacket]):
384
+ """
385
+ Receive advantage packets from other players.
386
+ These contain the advantages of the other players' rollouts estimated by them.
387
+ """
388
+ logger.info(f"Receiving advantage packets.")
389
+
390
+ assert (
391
+ len(advantage_packets) > 0
392
+ ), "At least one advantage packet must be provided."
393
+
394
+ for agent_id, agent_data in self.training_data.items():
395
+ coagent_advantage_packets = [
396
+ packet for packet in advantage_packets if packet.agent_id != agent_id
397
+ ]
398
+ agent_rollout_ids = agent_data.main_data.rollout_ids
399
+ agent_advantages = agent_data.main_advantages
400
+ co_agent_advantages = []
401
+ for rollout_id in agent_rollout_ids:
402
+ for co_agent_packet in coagent_advantage_packets:
403
+ if rollout_id in co_agent_packet.rollout_ids:
404
+ index = torch.where(rollout_id == co_agent_packet.rollout_ids)[
405
+ 0
406
+ ].item()
407
+ co_agent_advantages.append(
408
+ co_agent_packet.main_advantages[index]
409
+ )
410
+ # assumes that its two player game, with one co-agent
411
+ break
412
+ assert len(co_agent_advantages) == len(agent_advantages)
413
+ B = len(agent_advantages)
414
+ assert all(
415
+ a.shape[0] == b.shape[0]
416
+ for a, b in zip(co_agent_advantages, agent_advantages)
417
+ ), "Number of advantages must match for advantage alignment."
418
+
419
+ # Get padded tensors (advantage alignment is invariant to padding)
420
+ lengths = torch.tensor(
421
+ [len(t) for t in agent_advantages],
422
+ device=self.device,
423
+ dtype=torch.long,
424
+ )
425
+ padded_main_advantages = pad_sequence(
426
+ agent_advantages, batch_first=True, padding_value=0.0
427
+ )
428
+ if agent_data.alternative_advantages:
429
+ padded_alternative_advantages = pad_sequence(
430
+ agent_data.alternative_advantages,
431
+ batch_first=True,
432
+ padding_value=0.0,
433
+ ) # (B, P, A)
434
+ else:
435
+ padded_alternative_advantages = None
436
+ padded_co_agent_advantages = pad_sequence(
437
+ co_agent_advantages, batch_first=True, padding_value=0.0
438
+ )
439
+
440
+ # Create training batch data
441
+ credits, sub_tensors = get_advantage_alignment_credits(
442
+ a1=padded_main_advantages,
443
+ a1_alternative=padded_alternative_advantages,
444
+ a2=padded_co_agent_advantages,
445
+ beta=self.ad_align_beta,
446
+ gamma=self.ad_align_gamma,
447
+ exclude_k_equals_t=self.ad_align_exclude_k_equals_t,
448
+ use_sign=self.ad_align_use_sign,
449
+ clipping=self.ad_align_clipping,
450
+ force_coop_first_step=self.ad_align_force_coop_first_step,
451
+ use_old_ad_align=self.use_old_ad_align,
452
+ use_time_regularization=self.use_time_regularization,
453
+ rloo_branch=self.rloo_branch,
454
+ reuse_baseline=self.reuse_baseline,
455
+ mean_normalize_ad_align=self.mean_normalize_ad_align,
456
+ whiten_adalign_advantages=self.whiten_adalign_advantages,
457
+ whiten_adalign_advantages_time_step_wise=self.whiten_adalign_advantages_time_step_wise,
458
+ )
459
+ for key, value in sub_tensors.items():
460
+ self.rollout_tally.add_metric(
461
+ path=[key],
462
+ rollout_tally_item=RolloutTallyItem(
463
+ crn_ids=agent_data.main_data.crn_ids,
464
+ rollout_ids=agent_data.main_data.rollout_ids,
465
+ agent_ids=agent_data.main_data.agent_ids,
466
+ metric_matrix=value,
467
+ ),
468
+ )
469
+
470
+ if not self.skip_discounted_state_visitation:
471
+ credits = get_discounted_state_visitation_credits(
472
+ credits,
473
+ self.discount_factor,
474
+ )
475
+ self.rollout_tally.add_metric(
476
+ path=["discounted_state_visitation_credits"],
477
+ rollout_tally_item=RolloutTallyItem(
478
+ crn_ids=agent_data.main_data.crn_ids,
479
+ rollout_ids=agent_data.main_data.rollout_ids,
480
+ agent_ids=agent_data.main_data.agent_ids,
481
+ metric_matrix=sub_tensors[
482
+ "discounted_state_visitation_credits"
483
+ ],
484
+ ),
485
+ )
486
+
487
+ # Slice back to jagged
488
+ advantage_alignment_credits = [credits[i, : lengths[i]] for i in range(B)]
489
+ # Replace stored training data for this agent by the concrete trajectory batch
490
+ # and attach the computed credits for policy gradient.
491
+ self.training_data[agent_id] = agent_data.main_data
492
+ self.training_data[agent_id].batch_credits = advantage_alignment_credits
src_code_for_reproducibility/training/trainer_independent.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+
3
+ """
4
+ import logging
5
+ import os
6
+ import sys
7
+ from typing import Union
8
+
9
+ import torch
10
+ import torch.nn.functional as F
11
+ from accelerate import Accelerator
12
+ from pandas._libs.tslibs.offsets import CBMonthBegin
13
+ from peft import LoraConfig
14
+ from torch.nn.utils.rnn import pad_sequence
15
+ from transformers import AutoModelForCausalLM, AutoTokenizer
16
+
17
+ from mllm.markov_games.rollout_tree import *
18
+ from mllm.markov_games.rollout_tree import RolloutTreeRootNode
19
+ from mllm.training.credit_methods import (
20
+ get_discounted_returns,
21
+ get_discounted_state_visitation_credits,
22
+ get_generalized_advantage_estimates,
23
+ get_rloo_credits,
24
+ )
25
+ from mllm.training.tally_metrics import Tally
26
+ from mllm.training.tally_tokenwise import ContextualizedTokenwiseTally
27
+ from mllm.training.tokenize_chats import *
28
+ from mllm.training.tokenize_chats import process_training_chat
29
+ from mllm.training.trainer_common import BaseTrainer
30
+ from mllm.training.training_data_utils import *
31
+ from mllm.training.training_data_utils import (
32
+ TrainingBatch,
33
+ TrajectoryBatch,
34
+ get_tokenwise_credits,
35
+ )
36
+ from mllm.utils.resource_context import resource_logger_context
37
+
38
+ logger = logging.getLogger(__name__)
39
+ logger.addHandler(logging.StreamHandler(sys.stdout))
40
+
41
+
42
+ @dataclass
43
+ class TrainingData:
44
+ agent_id: str
45
+ main_data: TrajectoryBatch
46
+ # list-of-tensors: per rollout advantages with length jT
47
+ main_advantages: list[torch.FloatTensor] | None = None
48
+
49
+
50
+ class TrainerNaive(BaseTrainer):
51
+ def set_agent_trajectory_data(
52
+ self, agent_id: str, roots: list[RolloutTreeRootNode]
53
+ ) -> None:
54
+ """
55
+ TOWRITE
56
+ """
57
+ # TODO: append to current batch data instead, else we will only train for one agent!
58
+ self.policy_gradient_data = None
59
+
60
+ # Tensorize Chats
61
+ rollout_ids = []
62
+ crn_ids = [] # common random number id
63
+ batch_input_ids = []
64
+ batch_action_mask = []
65
+ batch_entropy_mask = []
66
+ batch_timesteps = []
67
+ batch_state_ends_mask = []
68
+ batch_engine_log_probs = []
69
+ batch_rewards = []
70
+ for root in roots:
71
+ rollout_id = root.id
72
+ self.debug_path_list.append(
73
+ "mgid:" + str(rollout_id) + "_agent_id:" + agent_id
74
+ )
75
+ rollout_ids.append(rollout_id)
76
+ crn_ids.append(root.crn_id)
77
+ chat, rewards = get_main_chat_list_and_rewards(agent_id=agent_id, root=root)
78
+ (
79
+ input_ids,
80
+ action_mask,
81
+ entropy_mask,
82
+ timesteps,
83
+ state_ends_mask,
84
+ engine_log_probs,
85
+ ) = process_training_chat(
86
+ tokenizer=self.tokenizer,
87
+ chat_history=chat,
88
+ entropy_mask_regex=self.entropy_mask_regex,
89
+ exploration_prompts_to_remove=self.exploration_prompts_to_remove,
90
+ )
91
+ batch_input_ids.append(input_ids)
92
+ batch_action_mask.append(action_mask)
93
+ batch_entropy_mask.append(entropy_mask)
94
+ batch_timesteps.append(timesteps)
95
+ batch_state_ends_mask.append(state_ends_mask)
96
+ batch_engine_log_probs.append(engine_log_probs)
97
+ batch_rewards.append(rewards)
98
+
99
+ trajectory_batch = TrajectoryBatch(
100
+ rollout_ids=torch.tensor(rollout_ids, dtype=torch.int32),
101
+ crn_ids=torch.tensor(crn_ids, dtype=torch.int32),
102
+ agent_ids=[agent_id] * len(rollout_ids),
103
+ batch_input_ids=batch_input_ids,
104
+ batch_action_mask=batch_action_mask,
105
+ batch_entropy_mask=batch_entropy_mask,
106
+ batch_timesteps=batch_timesteps,
107
+ batch_state_ends_mask=batch_state_ends_mask,
108
+ batch_rewards=batch_rewards,
109
+ batch_engine_log_probs=batch_engine_log_probs,
110
+ )
111
+
112
+ # Get Advantages
113
+ batch_advantages: torch.FloatTensor = (
114
+ self.get_advantages_with_critic_gradient_accumulation(trajectory_batch)
115
+ )
116
+
117
+ # Discount state visitation (the mathematically correct way)
118
+ if not self.skip_discounted_state_visitation:
119
+ for i in range(len(batch_advantages)):
120
+ batch_advantages[i] = get_discounted_state_visitation_credits(
121
+ batch_advantages[i].unsqueeze(0),
122
+ self.discount_factor,
123
+ ).squeeze(0)
124
+
125
+ self.training_data[agent_id] = TrainingData(
126
+ agent_id=agent_id,
127
+ main_data=trajectory_batch,
128
+ main_advantages=batch_advantages,
129
+ )
130
+
131
+ def receive_advantage_data(self, advantage_packets: list[AdvantagePacket]):
132
+ """
133
+ This trainer ignores the advantages of the other trainers.
134
+ """
135
+ for agent_id, agent_data in self.training_data.items():
136
+ self.training_data[agent_id] = agent_data.main_data
137
+ self.training_data[agent_id].batch_credits = agent_data.main_advantages
138
+
139
+ def share_advantage_data(self) -> list[AdvantagePacket]:
140
+ """
141
+ Share the advantage data with other agents.
142
+ Returns:
143
+ AdvantagePacket: The advantage packet containing the agent's advantages.
144
+ """
145
+ logger.info(f"Sharing advantage data.")
146
+ advantage_packets = []
147
+ for agent_id, agent_data in self.training_data.items():
148
+ advantage_packets.append(
149
+ AdvantagePacket(
150
+ agent_id=agent_id,
151
+ rollout_ids=agent_data.main_data.rollout_ids,
152
+ main_advantages=agent_data.main_advantages,
153
+ )
154
+ )
155
+ return advantage_packets
src_code_for_reproducibility/training/training_data_utils.py ADDED
@@ -0,0 +1,394 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Literal, Optional, Tuple
3
+
4
+ import torch
5
+ from torch.nn.utils.rnn import pad_sequence
6
+
7
+ from mllm.markov_games.rollout_tree import (
8
+ ChatTurn,
9
+ RolloutTreeBranchNode,
10
+ RolloutTreeNode,
11
+ RolloutTreeRootNode,
12
+ )
13
+
14
+
15
+ @dataclass
16
+ class AdvantagePacket:
17
+ agent_id: str
18
+ rollout_ids: torch.IntTensor # (B,)
19
+ # list-of-tensors
20
+ main_advantages: list[torch.FloatTensor]
21
+
22
+
23
+ class TrainingChatTurn:
24
+ # TODO: simplify by making this a child of ChatTurn
25
+ """
26
+ This class contains the chat turns for a single agent.
27
+ It is like ChatTurn, but with the time step added.
28
+ """
29
+
30
+ def __init__(
31
+ self,
32
+ time_step: int,
33
+ role: str,
34
+ agent_id: str,
35
+ content: str,
36
+ chat_template_token_ids: list[int],
37
+ reasoning_content: str,
38
+ is_state_end: bool,
39
+ out_token_ids: Optional[list[int]] = None,
40
+ log_probs: Optional[list[float]] = None,
41
+ ) -> None:
42
+ self.time_step = time_step
43
+ self.role = role
44
+ self.agent_id = agent_id
45
+ self.content = content
46
+ self.chat_template_token_ids = chat_template_token_ids
47
+ self.reasoning_content = reasoning_content
48
+ self.is_state_end = is_state_end
49
+ self.out_token_ids = out_token_ids
50
+ self.log_probs = log_probs
51
+
52
+ def dict(self):
53
+ return {
54
+ "time_step": self.time_step,
55
+ "role": self.role,
56
+ "agent_id": self.agent_id,
57
+ "content": self.content,
58
+ "chat_template_token_ids": self.chat_template_token_ids,
59
+ "reasoning_content": self.reasoning_content,
60
+ "is_state_end": self.is_state_end,
61
+ "out_token_ids": self.out_token_ids,
62
+ "log_probs": self.log_probs,
63
+ }
64
+
65
+
66
+ def get_main_chat_list_and_rewards(
67
+ agent_id: str, root: RolloutTreeRootNode | RolloutTreeNode
68
+ ) -> Tuple[list[TrainingChatTurn], torch.FloatTensor]:
69
+ """
70
+ This method traverses a rollout tree and returns a the list of ChatTurn
71
+ for an agent. If it encounters a branch node, it follows the main path.
72
+ """
73
+ # TODO; extend for all trees, not just linear
74
+ if isinstance(root, RolloutTreeRootNode):
75
+ current_node = root.child
76
+ else:
77
+ current_node = root
78
+
79
+ chat = []
80
+ rewards = []
81
+ while current_node is not None:
82
+ if isinstance(current_node, RolloutTreeBranchNode):
83
+ current_node = current_node.main_child
84
+ reward: float = current_node.step_log.simulation_step_log.rewards[agent_id]
85
+ rewards.append(reward)
86
+ chat_turns: list[TrainingChatTurn] = current_node.step_log.action_logs[
87
+ agent_id
88
+ ].chat_turns
89
+ chat_turns = [
90
+ TrainingChatTurn(time_step=current_node.time_step, **turn.model_dump())
91
+ for turn in chat_turns
92
+ ]
93
+ chat.extend(chat_turns)
94
+ current_node = current_node.child
95
+ return chat, torch.FloatTensor(rewards)
96
+
97
+
98
+ def get_tokenwise_credits(
99
+ # B := batch size, S := number of tokens / seq. length, T := number of states. `j` stands for jagged (see pytorch nested tensors.)
100
+ batch_timesteps: torch.IntTensor | torch.Tensor, # (B, jS),
101
+ batch_credits: torch.FloatTensor | torch.Tensor, # (B, jT)
102
+ ) -> torch.FloatTensor | torch.Tensor: # (B, jS)
103
+ """
104
+ TOWRITE
105
+ """
106
+ # TODO vectorize this code
107
+ batch_token_credits = []
108
+ for credits, timesteps in zip(batch_credits, batch_timesteps):
109
+ token_credits = torch.zeros_like(
110
+ timesteps,
111
+ dtype=credits.dtype,
112
+ device=timesteps.device,
113
+ )
114
+ for idx, credit in enumerate(credits):
115
+ token_credits[timesteps == idx] = credit
116
+ batch_token_credits.append(token_credits)
117
+ return batch_token_credits
118
+
119
+
120
+ @dataclass
121
+ class TrajectoryBatch:
122
+ """
123
+ Tensorized batch of trajectories using list-of-tensors for jagged dimensions.
124
+ """
125
+
126
+ # B := batch size, S := number of tokens / seq. length, T := number of states.
127
+ rollout_ids: torch.IntTensor # (B,)
128
+ crn_ids: torch.IntTensor # (B,)
129
+ agent_ids: list[str] # (B,)
130
+ batch_input_ids: list[torch.LongTensor] # List[(jS,)]
131
+ batch_action_mask: list[torch.BoolTensor] # List[(jS,)]
132
+ batch_entropy_mask: list[torch.BoolTensor] # List[(jS,)]
133
+ batch_timesteps: list[torch.IntTensor] # List[(jS,)]
134
+ batch_state_ends_mask: list[torch.BoolTensor] # List[(jS,)]
135
+ batch_engine_log_probs: Optional[list[torch.FloatTensor]] # List[(jS,)]
136
+ batch_rewards: list[torch.FloatTensor] # List[(jT,)]
137
+ batch_credits: Optional[list[torch.FloatTensor]] = None # List[(jS,)]
138
+
139
+ def __post_init__(self):
140
+ """
141
+ Validate per-sample consistency.
142
+ """
143
+ B = self.rollout_ids.shape[0]
144
+ assert (
145
+ self.crn_ids.shape[0] == B
146
+ ), "RNG IDs must have length equal to batch size."
147
+ assert (
148
+ len(self.agent_ids) == B
149
+ ), "agent_ids must have length equal to batch size."
150
+ assert (
151
+ len(self.batch_input_ids)
152
+ == len(self.batch_action_mask)
153
+ == len(self.batch_entropy_mask)
154
+ == len(self.batch_timesteps)
155
+ == len(self.batch_state_ends_mask)
156
+ == len(self.batch_engine_log_probs)
157
+ == len(self.batch_rewards)
158
+ == B
159
+ ), "Jagged lists must all have length equal to batch size."
160
+
161
+ for b in range(B):
162
+ nb_rewards = int(self.batch_rewards[b].shape[0])
163
+ nb_timesteps = int(torch.max(self.batch_timesteps[b]).item()) + 1
164
+ assert (
165
+ nb_rewards == nb_timesteps
166
+ ), "Number of rewards and timesteps mismatch."
167
+ assert (
168
+ self.batch_input_ids[b].shape[0]
169
+ == self.batch_action_mask[b].shape[0]
170
+ == self.batch_entropy_mask[b].shape[0]
171
+ == self.batch_engine_log_probs[b].shape[0]
172
+ == self.batch_timesteps[b].shape[0]
173
+ ), "Tensors must have the same shape along the jagged dimension."
174
+ assert (
175
+ int(self.batch_state_ends_mask[b].sum())
176
+ == self.batch_rewards[b].shape[0]
177
+ ), "Number of rewards must match number of state ends."
178
+
179
+ """
180
+ Entries:
181
+ Here, we ignore the batch dimension.
182
+ input_ids:
183
+ All of the tokens of both the user and the assistant, flattened.
184
+ action_mask:
185
+ Set to true on the tokens of the assistant (tokens generated by the model).
186
+ timesteps:
187
+ Therefore, max(timesteps) = Ns - 1.
188
+ state_ends_idx:
189
+ Indices of the tokens at which state descriptions end.
190
+ rewards:
191
+ rewards[t] := R_t(s_t, a_t)
192
+ Example:
193
+ position: "0 1 2 3 4 5 6 7 8 9 10 11 12 13 14"
194
+ input_ids: "U U U a a a U a U a a a U U U" (U := User, a := Assistant)
195
+ action_mask: "x x x ✓ ✓ ✓ x ✓ x ✓ ✓ ✓ x x x"
196
+ timestep: "0 0 0 0 0 0 1 1 1 1 1 1 2 2 2"
197
+ state_ends_dx: [2, 6, 14]
198
+ rewards: [r0, r1, r2]
199
+ """
200
+
201
+ def __getitem__(self, key) -> "TrajectoryBatch":
202
+ if isinstance(key, slice):
203
+ return TrajectoryBatch(
204
+ rollout_ids=self.rollout_ids.__getitem__(key),
205
+ crn_ids=self.crn_ids.__getitem__(key),
206
+ agent_ids=self.agent_ids[key],
207
+ batch_input_ids=self.batch_input_ids[key],
208
+ batch_action_mask=self.batch_action_mask[key],
209
+ batch_entropy_mask=self.batch_entropy_mask[key],
210
+ batch_timesteps=self.batch_timesteps[key],
211
+ batch_state_ends_mask=self.batch_state_ends_mask[key],
212
+ batch_engine_log_probs=self.batch_engine_log_probs[key],
213
+ batch_rewards=self.batch_rewards[key],
214
+ batch_credits=self.batch_credits[key] if self.batch_credits else None,
215
+ )
216
+
217
+ def __len__(self):
218
+ return len(self.batch_input_ids)
219
+
220
+ def to(self, device):
221
+ self.rollout_ids = self.rollout_ids.to(device)
222
+ self.crn_ids = self.crn_ids.to(device)
223
+ self.batch_input_ids = [t.to(device) for t in self.batch_input_ids]
224
+ self.batch_action_mask = [t.to(device) for t in self.batch_action_mask]
225
+ self.batch_entropy_mask = [t.to(device) for t in self.batch_entropy_mask]
226
+ self.batch_timesteps = [t.to(device) for t in self.batch_timesteps]
227
+ self.batch_state_ends_mask = [t.to(device) for t in self.batch_state_ends_mask]
228
+ self.batch_engine_log_probs = [
229
+ t.to(device) for t in self.batch_engine_log_probs
230
+ ]
231
+ self.batch_rewards = [t.to(device) for t in self.batch_rewards]
232
+ self.batch_credits = (
233
+ [t.to(device) for t in self.batch_credits] if self.batch_credits else None
234
+ )
235
+
236
+ def get_padded_tensors_for_critic(self):
237
+ """
238
+ Returns:
239
+ padded_batch_input_ids: (B, P)
240
+ padded_batch_state_ends_mask: (B, P)
241
+ timestep_counts: (B,) tensor of ints indicating number of states per sample
242
+ """
243
+ padded_batch_input_ids = pad_sequence(
244
+ self.batch_input_ids, batch_first=True, padding_value=0
245
+ )
246
+ padded_batch_state_ends_mask = pad_sequence(
247
+ self.batch_state_ends_mask, batch_first=True, padding_value=0
248
+ ).bool()
249
+ # number of states equals number of True in state_ends_mask
250
+ timestep_counts = torch.tensor(
251
+ [int(mask.sum().item()) for mask in self.batch_state_ends_mask],
252
+ device=padded_batch_input_ids.device,
253
+ dtype=torch.long,
254
+ )
255
+ return padded_batch_input_ids, padded_batch_state_ends_mask, timestep_counts
256
+
257
+
258
+ timestep = int
259
+
260
+
261
+ @dataclass
262
+ class PaddedTensorTrainingBatch:
263
+ batch_input_ids: torch.LongTensor | torch.Tensor
264
+ batch_action_mask: torch.BoolTensor | torch.Tensor
265
+ batch_entropy_mask: Optional[torch.BoolTensor | torch.Tensor]
266
+ batch_credits: torch.FloatTensor | torch.Tensor
267
+ batch_engine_log_probs: torch.FloatTensor | torch.Tensor
268
+ batch_timesteps: torch.IntTensor | torch.Tensor
269
+
270
+ def __len__(self):
271
+ return self.batch_input_ids.shape[0]
272
+
273
+ def to(self, device):
274
+ self.batch_input_ids = self.batch_input_ids.to(device)
275
+ self.batch_action_mask = self.batch_action_mask.to(device)
276
+ self.batch_entropy_mask = self.batch_entropy_mask.to(device)
277
+ self.batch_credits = self.batch_credits.to(device)
278
+ self.batch_engine_log_probs = self.batch_engine_log_probs.to(device)
279
+ self.batch_timesteps = self.batch_timesteps.to(device)
280
+
281
+
282
+ @dataclass
283
+ class TrainingBatch:
284
+ rollout_ids: torch.IntTensor | torch.Tensor # (B,)
285
+ batch_input_ids: list[torch.LongTensor] # List[(jS,)]
286
+ batch_action_mask: list[torch.BoolTensor] # List[(jS,)]
287
+ batch_entropy_mask: Optional[list[torch.BoolTensor]] # List[(jS,)]
288
+ batch_credits: list[torch.FloatTensor] # List[(jS,)]
289
+ batch_engine_log_probs: list[torch.FloatTensor] # List[(jS,)]
290
+ batch_timesteps: list[torch.IntTensor] # List[(jS,)]
291
+
292
+ def __post_init__(self):
293
+ # Put everything in the right device
294
+ # self.rollout_ids = self.rollout_ids.to("cuda" if torch.cuda.is_available() else "cpu")
295
+ # self.batch_input_ids = self.batch_input_ids.to("cuda" if torch.cuda.is_available() else "cpu")
296
+ # self.batch_action_mask = self.batch_action_mask.to("cuda" if torch.cuda.is_available() else "cpu")
297
+ # self.batch_credits = self.batch_credits.to("cuda" if torch.cuda.is_available() else "cpu")
298
+ # Ensure batch dimension is present
299
+ assert (
300
+ len(self.batch_input_ids)
301
+ == len(self.batch_action_mask)
302
+ == len(self.batch_entropy_mask)
303
+ == len(self.batch_credits)
304
+ == len(self.batch_engine_log_probs)
305
+ == len(self.batch_timesteps)
306
+ == self.rollout_ids.shape[0]
307
+ ), "Jagged lists must all have length equal to batch size."
308
+ for inp, mask, cred, engine_log_prob, timestep in zip(
309
+ self.batch_input_ids,
310
+ self.batch_action_mask,
311
+ self.batch_credits,
312
+ self.batch_engine_log_probs,
313
+ self.batch_timesteps,
314
+ ):
315
+ assert (
316
+ inp.shape[0]
317
+ == mask.shape[0]
318
+ == cred.shape[0]
319
+ == engine_log_prob.shape[0]
320
+ == timestep.shape[0]
321
+ ), "Tensors must have the same shapes along the jagged dimension."
322
+
323
+ def __getitem__(self, key) -> "TrainingBatch":
324
+ if isinstance(key, slice):
325
+ return TrainingBatch(
326
+ rollout_ids=self.rollout_ids.__getitem__(key),
327
+ batch_input_ids=self.batch_input_ids[key],
328
+ batch_action_mask=self.batch_action_mask[key],
329
+ batch_entropy_mask=self.batch_entropy_mask[key],
330
+ batch_credits=self.batch_credits[key],
331
+ batch_engine_log_probs=self.batch_engine_log_probs[key],
332
+ batch_timesteps=self.batch_timesteps[key],
333
+ )
334
+
335
+ def __len__(self):
336
+ return len(self.batch_input_ids)
337
+
338
+ def to(self, device):
339
+ self.rollout_ids = self.rollout_ids.to(device)
340
+ self.batch_input_ids = [t.to(device) for t in self.batch_input_ids]
341
+ self.batch_action_mask = [t.to(device) for t in self.batch_action_mask]
342
+ self.batch_entropy_mask = [t.to(device) for t in self.batch_entropy_mask]
343
+ self.batch_credits = [t.to(device) for t in self.batch_credits]
344
+ self.batch_engine_log_probs = [
345
+ t.to(device) for t in self.batch_engine_log_probs
346
+ ]
347
+ self.batch_timesteps = [t.to(device) for t in self.batch_timesteps]
348
+
349
+ def get_padded_tensors(self, padding: float = 0.0):
350
+ """
351
+ TOWRITE
352
+ Always pad to the right.
353
+ """
354
+ padded_batch_input_ids = pad_sequence(
355
+ self.batch_input_ids, batch_first=True, padding_value=int(padding)
356
+ )
357
+ padded_batch_action_mask = pad_sequence(
358
+ [m.to(dtype=torch.bool) for m in self.batch_action_mask],
359
+ batch_first=True,
360
+ padding_value=False,
361
+ )
362
+ padded_batch_entropy_mask = pad_sequence(
363
+ self.batch_entropy_mask, batch_first=True, padding_value=False
364
+ )
365
+ padded_batch_credits = pad_sequence(
366
+ self.batch_credits, batch_first=True, padding_value=float(padding)
367
+ )
368
+ padded_batch_engine_log_probs = pad_sequence(
369
+ self.batch_engine_log_probs, batch_first=True, padding_value=float(padding)
370
+ )
371
+ padded_batch_timesteps = pad_sequence(
372
+ self.batch_timesteps, batch_first=True, padding_value=0
373
+ )
374
+
375
+ return PaddedTensorTrainingBatch(
376
+ padded_batch_input_ids,
377
+ padded_batch_action_mask,
378
+ padded_batch_entropy_mask,
379
+ padded_batch_credits,
380
+ padded_batch_engine_log_probs,
381
+ padded_batch_timesteps,
382
+ )
383
+
384
+ def append(self, other: "TrainingBatch"):
385
+ self.rollout_ids = torch.cat([self.rollout_ids, other.rollout_ids])
386
+ self.batch_input_ids.extend(other.batch_input_ids)
387
+ self.batch_action_mask.extend(other.batch_action_mask)
388
+ self.batch_entropy_mask.extend(other.batch_entropy_mask)
389
+ self.batch_credits.extend(other.batch_credits)
390
+ self.batch_engine_log_probs.extend(other.batch_engine_log_probs)
391
+ self.batch_timesteps.extend(other.batch_timesteps)
392
+
393
+
394
+ timestep = int