Muqeeth commited on
Commit
683db60
·
verified ·
1 Parent(s): 2ee6aa6

Add files using upload-large-folder tool

Browse files
Files changed (50) hide show
  1. .hydra/config.yaml +173 -0
  2. .hydra/hydra.yaml +154 -0
  3. .hydra/overrides.yaml +1 -0
  4. run.log +0 -0
  5. seed_123/Qwen/Qwen2.5-7B-Instruct/adapters/README.md +207 -0
  6. seed_123/Qwen/Qwen2.5-7B-Instruct/adapters/agent_adapter/adapter_config.json +42 -0
  7. seed_123/Qwen/Qwen2.5-7B-Instruct/adapters/critic_adapter/adapter_config.json +42 -0
  8. src_code_for_reproducibility/__init__.py +0 -0
  9. src_code_for_reproducibility/markov_games/statistics_runner.py +405 -0
  10. src_code_for_reproducibility/models/__init__.py +0 -0
  11. src_code_for_reproducibility/models/adapter_training_wrapper.py +98 -0
  12. src_code_for_reproducibility/models/human_policy.py +255 -0
  13. src_code_for_reproducibility/models/inference_backend.py +39 -0
  14. src_code_for_reproducibility/models/inference_backend_dummy.py +54 -0
  15. src_code_for_reproducibility/models/inference_backend_sglang.py +86 -0
  16. src_code_for_reproducibility/models/inference_backend_sglang_local_server.py +127 -0
  17. src_code_for_reproducibility/models/inference_backend_vllm.py +118 -0
  18. src_code_for_reproducibility/models/inference_backend_vllm_local_server.py +160 -0
  19. src_code_for_reproducibility/models/large_language_model_api.py +171 -0
  20. src_code_for_reproducibility/models/large_language_model_local.py +384 -0
  21. src_code_for_reproducibility/models/scalar_critic.py +54 -0
  22. src_code_for_reproducibility/training/README.md +20 -0
  23. src_code_for_reproducibility/training/__init__.py +0 -0
  24. src_code_for_reproducibility/training/annealing_methods.py +6 -0
  25. src_code_for_reproducibility/training/credit_methods.py +304 -0
  26. src_code_for_reproducibility/training/tally_metrics.py +55 -0
  27. src_code_for_reproducibility/training/tally_rollout.py +137 -0
  28. src_code_for_reproducibility/training/tally_tokenwise.py +276 -0
  29. src_code_for_reproducibility/training/tokenize_chats.py +128 -0
  30. src_code_for_reproducibility/training/trainer_ad_align.py +495 -0
  31. src_code_for_reproducibility/training/trainer_common.py +1054 -0
  32. src_code_for_reproducibility/training/trainer_independent.py +155 -0
  33. src_code_for_reproducibility/training/trainer_sum_rewards.py +127 -0
  34. src_code_for_reproducibility/training/training_data_utils.py +394 -0
  35. src_code_for_reproducibility/utils/__init__.py +0 -0
  36. src_code_for_reproducibility/utils/dict_get_path.py +12 -0
  37. src_code_for_reproducibility/utils/format_time.py +7 -0
  38. src_code_for_reproducibility/utils/gather_training_stats.py +257 -0
  39. src_code_for_reproducibility/utils/get_coagent_id.py +4 -0
  40. src_code_for_reproducibility/utils/get_stochastic_game_lengths.py +30 -0
  41. src_code_for_reproducibility/utils/kill_sglang.py +17 -0
  42. src_code_for_reproducibility/utils/output_source_code.py +6 -0
  43. src_code_for_reproducibility/utils/resource_context.py +78 -0
  44. src_code_for_reproducibility/utils/rollout_tree_chat_htmls.py +1921 -0
  45. src_code_for_reproducibility/utils/rollout_tree_gather_utils.py +314 -0
  46. src_code_for_reproducibility/utils/rollout_tree_stats.py +50 -0
  47. src_code_for_reproducibility/utils/short_id_gen.py +11 -0
  48. src_code_for_reproducibility/utils/stat_pack.py +113 -0
  49. src_code_for_reproducibility/utils/update_start_epoch.py +9 -0
  50. src_code_for_reproducibility/utils/wandb_utils.py +164 -0
.hydra/config.yaml ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ experiment:
2
+ wandb_enabled: true
3
+ nb_epochs: 3000
4
+ nb_matches_per_iteration: 64
5
+ reinit_matches_each_it: true
6
+ checkpoint_every_n_iterations: 50
7
+ start_epoch: 0
8
+ resume_experiment: true
9
+ base_seed: 123
10
+ seed_group_size: 8
11
+ train: true
12
+ stat_methods_for_live_wandb: mllm.markov_games.negotiation.negotiation_statistics
13
+ name: tas_rps_startend_naive_seed123
14
+ agent_buffer: false
15
+ keep_agent_buffer_count: ${lora_count}
16
+ agent_buffer_recent_k: -1
17
+ description: Trust-and-Split Rock Paper Scissors negotiation game
18
+ logging:
19
+ wandb:
20
+ enabled: false
21
+ project: llm-negotiation
22
+ entity: null
23
+ mode: online
24
+ name: null
25
+ group: null
26
+ tags: []
27
+ notes: null
28
+ temperature: 1.0
29
+ markov_games:
30
+ runner_method_name: LinearRunner
31
+ runner_kwargs: {}
32
+ group_by_round: true
33
+ simulation_class_name: TrustAndSplitRPSSimulation
34
+ simulation_init_args:
35
+ nb_of_rounds: 10
36
+ quota_messages_per_agent_per_round: 1
37
+ alternating_hands: false
38
+ agents:
39
+ 0:
40
+ agent_id: ${agent_0_id}
41
+ agent_name: Alice
42
+ agent_class_name: TrustAndSplitRPSAgent
43
+ policy_id: base_llm/agent_adapter
44
+ init_kwargs:
45
+ goal: Maximize your total points over the whole game.
46
+ num_message_chars: 500
47
+ message_start_end_format: true
48
+ proposal_start_end_format: true
49
+ 1:
50
+ agent_id: ${agent_1_id}
51
+ agent_name: Bob
52
+ agent_class_name: TrustAndSplitRPSAgent
53
+ policy_id: base_llm/agent_adapter
54
+ init_kwargs:
55
+ goal: Maximize your total points over the whole game.
56
+ num_message_chars: 500
57
+ message_start_end_format: true
58
+ proposal_start_end_format: true
59
+ models:
60
+ base_llm:
61
+ class: LeanLocalLLM
62
+ init_args:
63
+ llm_id: base_llm
64
+ model_name: Qwen/Qwen2.5-7B-Instruct
65
+ inference_backend: vllm
66
+ hf_kwargs:
67
+ device_map: auto
68
+ torch_dtype: bfloat16
69
+ max_memory:
70
+ 0: 20GiB
71
+ attn_implementation: flash_attention_2
72
+ inference_backend_init_kwargs:
73
+ enable_lora: true
74
+ seed: ${experiment.base_seed}
75
+ enable_prefix_caching: true
76
+ max_model_len: 10000.0
77
+ gpu_memory_utilization: 0.5
78
+ dtype: bfloat16
79
+ trust_remote_code: true
80
+ max_lora_rank: 32
81
+ enforce_eager: false
82
+ max_loras: ${lora_count}
83
+ max_cpu_loras: ${lora_count}
84
+ enable_sleep_mode: true
85
+ inference_backend_sampling_params:
86
+ temperature: ${temperature}
87
+ top_p: 1.0
88
+ max_tokens: 400
89
+ top_k: -1
90
+ logprobs: 0
91
+ adapter_configs:
92
+ agent_adapter:
93
+ task_type: CAUSAL_LM
94
+ r: 32
95
+ lora_alpha: 64
96
+ lora_dropout: 0.0
97
+ target_modules: all-linear
98
+ critic_adapter:
99
+ task_type: CAUSAL_LM
100
+ r: 32
101
+ lora_alpha: 64
102
+ lora_dropout: 0.0
103
+ target_modules: all-linear
104
+ enable_thinking: null
105
+ regex_max_attempts: 1
106
+ critics:
107
+ agent_critic:
108
+ module_pointer:
109
+ - base_llm
110
+ - critic_adapter
111
+ optimizers:
112
+ agent_optimizer:
113
+ module_pointer:
114
+ - base_llm
115
+ - agent_adapter
116
+ optimizer_class_name: torch.optim.Adam
117
+ init_args:
118
+ lr: 3.0e-06
119
+ weight_decay: 0.0
120
+ critic_optimizer:
121
+ module_pointer: agent_critic
122
+ optimizer_class_name: torch.optim.Adam
123
+ init_args:
124
+ lr: 3.0e-06
125
+ weight_decay: 0.0
126
+ trainers:
127
+ agent_trainer:
128
+ class: TrainerNaive
129
+ module_pointers:
130
+ policy:
131
+ - base_llm
132
+ - agent_adapter
133
+ policy_optimizer: agent_optimizer
134
+ critic: agent_critic
135
+ critic_optimizer: critic_optimizer
136
+ kwargs:
137
+ entropy_coeff: 0.0
138
+ entropy_topk: null
139
+ entropy_mask_regex: null
140
+ kl_coeff: 0.001
141
+ gradient_clipping: 1.0
142
+ restrict_tokens: null
143
+ mini_batch_size: 1
144
+ use_gradient_checkpointing: true
145
+ temperature: ${temperature}
146
+ device: cuda:0
147
+ use_gae: false
148
+ whiten_advantages: false
149
+ whiten_advantages_time_step_wise: false
150
+ skip_discounted_state_visitation: true
151
+ use_gae_lambda_annealing: false
152
+ gae_lambda_annealing_method: None
153
+ gae_lambda_annealing_method_params: None
154
+ gae_lambda_annealing_limit: 0.95
155
+ discount_factor: 0.96
156
+ use_rloo: true
157
+ enable_tokenwise_logging: false
158
+ pg_loss_normalization: nb_tokens
159
+ truncated_importance_sampling_ratio_cap: 2.0
160
+ reward_normalizing_constant: 100.0
161
+ train_on_which_data:
162
+ agent_trainer: ${agent_ids}
163
+ lora_count: 30
164
+ common_agent_kwargs:
165
+ goal: Maximize your total points over the whole game.
166
+ num_message_chars: 500
167
+ message_start_end_format: true
168
+ proposal_start_end_format: true
169
+ agent_0_id: Alice
170
+ agent_1_id: Bob
171
+ agent_ids:
172
+ - Alice
173
+ - Bob
.hydra/hydra.yaml ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: ${oc.env:SCRATCH}/llm_negotiation/${now:%Y_%m}/${experiment.name}
4
+ sweep:
5
+ dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params: null
13
+ help:
14
+ app_name: ${hydra.job.name}
15
+ header: '${hydra.help.app_name} is powered by Hydra.
16
+
17
+ '
18
+ footer: 'Powered by Hydra (https://hydra.cc)
19
+
20
+ Use --hydra-help to view Hydra specific help
21
+
22
+ '
23
+ template: '${hydra.help.header}
24
+
25
+ == Configuration groups ==
26
+
27
+ Compose your configuration from those groups (group=option)
28
+
29
+
30
+ $APP_CONFIG_GROUPS
31
+
32
+
33
+ == Config ==
34
+
35
+ Override anything in the config (foo.bar=value)
36
+
37
+
38
+ $CONFIG
39
+
40
+
41
+ ${hydra.help.footer}
42
+
43
+ '
44
+ hydra_help:
45
+ template: 'Hydra (${hydra.runtime.version})
46
+
47
+ See https://hydra.cc for more info.
48
+
49
+
50
+ == Flags ==
51
+
52
+ $FLAGS_HELP
53
+
54
+
55
+ == Configuration groups ==
56
+
57
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
58
+ to command line)
59
+
60
+
61
+ $HYDRA_CONFIG_GROUPS
62
+
63
+
64
+ Use ''--cfg hydra'' to Show the Hydra config.
65
+
66
+ '
67
+ hydra_help: ???
68
+ hydra_logging:
69
+ version: 1
70
+ formatters:
71
+ simple:
72
+ format: '[%(asctime)s][HYDRA] %(message)s'
73
+ handlers:
74
+ console:
75
+ class: logging.StreamHandler
76
+ formatter: simple
77
+ stream: ext://sys.stdout
78
+ root:
79
+ level: INFO
80
+ handlers:
81
+ - console
82
+ loggers:
83
+ logging_example:
84
+ level: DEBUG
85
+ disable_existing_loggers: false
86
+ job_logging:
87
+ version: 1
88
+ formatters:
89
+ simple:
90
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
91
+ handlers:
92
+ console:
93
+ class: logging.StreamHandler
94
+ formatter: simple
95
+ stream: ext://sys.stdout
96
+ file:
97
+ class: logging.FileHandler
98
+ formatter: simple
99
+ filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
100
+ root:
101
+ level: INFO
102
+ handlers:
103
+ - console
104
+ - file
105
+ disable_existing_loggers: false
106
+ env: {}
107
+ mode: RUN
108
+ searchpath: []
109
+ callbacks: {}
110
+ output_subdir: .hydra
111
+ overrides:
112
+ hydra:
113
+ - hydra.mode=RUN
114
+ task: []
115
+ job:
116
+ name: run
117
+ chdir: false
118
+ override_dirname: ''
119
+ id: ???
120
+ num: ???
121
+ config_name: tas_rps_startend_naive_seed123.yaml
122
+ env_set: {}
123
+ env_copy: []
124
+ config:
125
+ override_dirname:
126
+ kv_sep: '='
127
+ item_sep: ','
128
+ exclude_keys: []
129
+ runtime:
130
+ version: 1.3.2
131
+ version_base: '1.1'
132
+ cwd: /scratch/m/muqeeth/llm_negotiation
133
+ config_sources:
134
+ - path: hydra.conf
135
+ schema: pkg
136
+ provider: hydra
137
+ - path: /scratch/m/muqeeth/llm_negotiation/configs
138
+ schema: file
139
+ provider: main
140
+ - path: ''
141
+ schema: structured
142
+ provider: schema
143
+ output_dir: /scratch/m/muqeeth/llm_negotiation/2025_11/tas_rps_startend_naive_seed123
144
+ choices:
145
+ hydra/env: default
146
+ hydra/callbacks: null
147
+ hydra/job_logging: default
148
+ hydra/hydra_logging: default
149
+ hydra/hydra_help: default
150
+ hydra/help: default
151
+ hydra/sweeper: basic
152
+ hydra/launcher: basic
153
+ hydra/output: default
154
+ verbose: false
.hydra/overrides.yaml ADDED
@@ -0,0 +1 @@
 
 
1
+ []
run.log ADDED
The diff for this file is too large to render. See raw diff
 
seed_123/Qwen/Qwen2.5-7B-Instruct/adapters/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2.5-7B-Instruct
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:Qwen/Qwen2.5-7B-Instruct
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.17.1
seed_123/Qwen/Qwen2.5-7B-Instruct/adapters/agent_adapter/adapter_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen2.5-7B-Instruct",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 64,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "qalora_group_size": 16,
24
+ "r": 32,
25
+ "rank_pattern": {},
26
+ "revision": null,
27
+ "target_modules": [
28
+ "up_proj",
29
+ "o_proj",
30
+ "q_proj",
31
+ "k_proj",
32
+ "down_proj",
33
+ "gate_proj",
34
+ "v_proj"
35
+ ],
36
+ "target_parameters": null,
37
+ "task_type": "CAUSAL_LM",
38
+ "trainable_token_indices": null,
39
+ "use_dora": false,
40
+ "use_qalora": false,
41
+ "use_rslora": false
42
+ }
seed_123/Qwen/Qwen2.5-7B-Instruct/adapters/critic_adapter/adapter_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen2.5-7B-Instruct",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 64,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "qalora_group_size": 16,
24
+ "r": 32,
25
+ "rank_pattern": {},
26
+ "revision": null,
27
+ "target_modules": [
28
+ "up_proj",
29
+ "o_proj",
30
+ "q_proj",
31
+ "k_proj",
32
+ "down_proj",
33
+ "gate_proj",
34
+ "v_proj"
35
+ ],
36
+ "target_parameters": null,
37
+ "task_type": "CAUSAL_LM",
38
+ "trainable_token_indices": null,
39
+ "use_dora": false,
40
+ "use_qalora": false,
41
+ "use_rslora": false
42
+ }
src_code_for_reproducibility/__init__.py ADDED
File without changes
src_code_for_reproducibility/markov_games/statistics_runner.py ADDED
@@ -0,0 +1,405 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import gc
4
+ import json
5
+ import pickle
6
+ from dataclasses import dataclass
7
+ from pathlib import Path
8
+ from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional
9
+
10
+ from basic_render import find_iteration_folders
11
+
12
+ from mllm.markov_games.rollout_tree import (
13
+ RolloutTreeBranchNode,
14
+ RolloutTreeNode,
15
+ RolloutTreeRootNode,
16
+ SimulationStepLog,
17
+ )
18
+
19
+
20
+ def _iterate_main_nodes(root: RolloutTreeRootNode) -> Iterator[RolloutTreeNode]:
21
+ """
22
+ Iterate the main path nodes without materializing full path lists.
23
+ """
24
+ current = root.child
25
+ while current is not None:
26
+ if isinstance(current, RolloutTreeNode):
27
+ yield current
28
+ current = current.child
29
+ elif isinstance(current, RolloutTreeBranchNode):
30
+ # Follow only the main child on the main trajectory
31
+ current = current.main_child
32
+ else:
33
+ break
34
+
35
+
36
+ def iterate_main_simulation_logs(
37
+ root: RolloutTreeRootNode,
38
+ ) -> Iterator[SimulationStepLog]:
39
+ for node in _iterate_main_nodes(root):
40
+ yield node.step_log.simulation_step_log
41
+
42
+
43
+ def stream_rollout_files(iteration_folder: Path) -> Iterator[Path]:
44
+ for p in iteration_folder.rglob("*.rt.pkl"):
45
+ if p.is_file():
46
+ yield p
47
+
48
+
49
+ def load_root(path: Path) -> RolloutTreeRootNode:
50
+ with open(path, "rb") as f:
51
+ data = pickle.load(f)
52
+ return RolloutTreeRootNode.model_validate(data)
53
+
54
+
55
+ @dataclass
56
+ class StatRecord:
57
+ mgid: int
58
+ crn_id: Optional[int]
59
+ iteration: str
60
+ values: Dict[str, Any]
61
+
62
+
63
+ class StatComputer:
64
+ """
65
+ Stateful stat computer that consumes SimulationStepLog instances
66
+ and produces final aggregated values for one rollout (mgid).
67
+ """
68
+
69
+ def update(self, sl: SimulationStepLog) -> None: # pragma: no cover - interface
70
+ raise NotImplementedError
71
+
72
+ def finalize(self) -> Dict[str, Any]: # pragma: no cover - interface
73
+ raise NotImplementedError
74
+
75
+
76
+ def run_stats(
77
+ data_root: Path,
78
+ game_name: str,
79
+ make_computers: Callable[[], List[StatComputer]],
80
+ output_filename: Optional[str] = None,
81
+ output_format: str = "json", # "json" (dict of lists) or "jsonl"
82
+ ) -> Path:
83
+ """
84
+ Compute stats across all iteration_* folders under data_root.
85
+ Writes JSONL to data_root/statistics/<output_filename or f"{game_name}.stats.jsonl">.
86
+ """
87
+ data_root = Path(data_root)
88
+ outdir = data_root / "statistics"
89
+ outdir.mkdir(parents=True, exist_ok=True)
90
+ # Choose extension by format
91
+ default_name = (
92
+ f"{game_name}.stats.json"
93
+ if output_format == "json"
94
+ else f"{game_name}.stats.jsonl"
95
+ )
96
+ outfile = outdir / (
97
+ output_filename if output_filename is not None else default_name
98
+ )
99
+
100
+ # Rewrite file each run to keep it clean and small
101
+ if outfile.exists():
102
+ outfile.unlink()
103
+
104
+ iteration_folders = find_iteration_folders(str(data_root))
105
+
106
+ # If writing JSONL, stream directly; otherwise accumulate minimal records
107
+ if output_format == "jsonl":
108
+ with open(outfile, "w", encoding="utf-8") as w:
109
+ for iteration_folder in iteration_folders:
110
+ iteration_name = Path(iteration_folder).name
111
+ for pkl_path in stream_rollout_files(Path(iteration_folder)):
112
+ root = load_root(pkl_path)
113
+
114
+ computers = make_computers()
115
+ for sl in iterate_main_simulation_logs(root):
116
+ for comp in computers:
117
+ try:
118
+ comp.update(sl)
119
+ except Exception:
120
+ continue
121
+
122
+ values: Dict[str, Any] = {}
123
+ for comp in computers:
124
+ try:
125
+ values.update(comp.finalize())
126
+ except Exception:
127
+ continue
128
+
129
+ rec = {
130
+ "mgid": getattr(root, "id", None),
131
+ "crn_id": getattr(root, "crn_id", None),
132
+ "iteration": iteration_name,
133
+ "stats": values,
134
+ }
135
+ w.write(json.dumps(rec, ensure_ascii=False) + "\n")
136
+
137
+ del root
138
+ del computers
139
+ gc.collect()
140
+ else:
141
+ # Aggregate to dict-of-lists for easier plotting
142
+ records: List[Dict[str, Any]] = []
143
+ # Process in deterministic order
144
+ for iteration_folder in iteration_folders:
145
+ iteration_name = Path(iteration_folder).name
146
+ for pkl_path in stream_rollout_files(Path(iteration_folder)):
147
+ root = load_root(pkl_path)
148
+
149
+ computers = make_computers()
150
+ for sl in iterate_main_simulation_logs(root):
151
+ for comp in computers:
152
+ try:
153
+ comp.update(sl)
154
+ except Exception:
155
+ continue
156
+
157
+ values: Dict[str, Any] = {}
158
+ for comp in computers:
159
+ try:
160
+ values.update(comp.finalize())
161
+ except Exception:
162
+ continue
163
+
164
+ records.append(
165
+ {
166
+ "mgid": getattr(root, "id", None),
167
+ "crn_id": getattr(root, "crn_id", None),
168
+ "iteration": iteration_name,
169
+ "stats": values,
170
+ }
171
+ )
172
+
173
+ del root
174
+ del computers
175
+ gc.collect()
176
+
177
+ # Build dict-of-lists with nested stats preserved
178
+ # Collect all stat keys and nested agent keys where needed
179
+ mgids: List[Any] = []
180
+ crn_ids: List[Any] = []
181
+ iterations_out: List[str] = []
182
+ # stats_out is a nested structure mirroring keys but with lists
183
+ stats_out: Dict[str, Any] = {}
184
+
185
+ # First pass to collect union of keys
186
+ stat_keys: set[str] = set()
187
+ nested_agent_keys: Dict[str, set[str]] = {}
188
+ for r in records:
189
+ stats = r.get("stats", {}) or {}
190
+ for k, v in stats.items():
191
+ stat_keys.add(k)
192
+ if isinstance(v, dict):
193
+ nested = nested_agent_keys.setdefault(k, set())
194
+ for ak in v.keys():
195
+ nested.add(str(ak))
196
+
197
+ # Initialize structure
198
+ for k in stat_keys:
199
+ if k in nested_agent_keys:
200
+ stats_out[k] = {ak: [] for ak in sorted(nested_agent_keys[k])}
201
+ else:
202
+ stats_out[k] = []
203
+
204
+ # Fill lists
205
+ for r in records:
206
+ mgids.append(r.get("mgid"))
207
+ crn_ids.append(r.get("crn_id"))
208
+ iterations_out.append(r.get("iteration"))
209
+ stats = r.get("stats", {}) or {}
210
+ for k in stat_keys:
211
+ val = stats.get(k)
212
+ if isinstance(stats_out[k], dict):
213
+ # per-agent dict
214
+ agent_dict = val if isinstance(val, dict) else {}
215
+ for ak in stats_out[k].keys():
216
+ stats_out[k][ak].append(agent_dict.get(ak))
217
+ else:
218
+ stats_out[k].append(val)
219
+
220
+ with open(outfile, "w", encoding="utf-8") as w:
221
+ json.dump(
222
+ {
223
+ "mgid": mgids,
224
+ "crn_id": crn_ids,
225
+ "iteration": iterations_out,
226
+ "stats": stats_out,
227
+ },
228
+ w,
229
+ ensure_ascii=False,
230
+ )
231
+
232
+ return outfile
233
+
234
+
235
+ def run_stats_functional(
236
+ data_root: Path,
237
+ game_name: str,
238
+ metrics: Dict[str, Callable[[SimulationStepLog], Optional[Dict[str, float]]]],
239
+ output_filename: Optional[str] = None,
240
+ output_format: str = "json",
241
+ ) -> Path:
242
+ """
243
+ Functional variant where metrics is a dict of name -> f(SimulationStepLog) -> {agent_id: value}.
244
+ Aggregates per rollout by averaging over steps where a metric produced a value.
245
+ Writes a single consolidated file in data_root/statistics/.
246
+ """
247
+ data_root = Path(data_root)
248
+ outdir = data_root / "statistics"
249
+ outdir.mkdir(parents=True, exist_ok=True)
250
+ default_name = (
251
+ f"{game_name}.stats.json"
252
+ if output_format == "json"
253
+ else f"{game_name}.stats.jsonl"
254
+ )
255
+ outfile = outdir / (
256
+ output_filename if output_filename is not None else default_name
257
+ )
258
+
259
+ if outfile.exists():
260
+ outfile.unlink()
261
+
262
+ iteration_folders = find_iteration_folders(str(data_root))
263
+
264
+ def finalize_rollout(
265
+ agg: Dict[str, Dict[str, List[float]]]
266
+ ) -> Dict[str, Dict[str, float]]:
267
+ # avg per metric per agent
268
+ result: Dict[str, Dict[str, float]] = {}
269
+ for mname, agent_values in agg.items():
270
+ result[mname] = {}
271
+ for aid, vals in agent_values.items():
272
+ if not vals:
273
+ result[mname][aid] = None # keep alignment; could be None
274
+ else:
275
+ result[mname][aid] = sum(vals) / len(vals)
276
+ return result
277
+
278
+ if output_format == "jsonl":
279
+ with open(outfile, "w", encoding="utf-8") as w:
280
+ for iteration_folder in iteration_folders:
281
+ iteration_name = Path(iteration_folder).name
282
+ for pkl_path in stream_rollout_files(Path(iteration_folder)):
283
+ root = load_root(pkl_path)
284
+
285
+ # aggregator structure: metric -> agent_id -> list of values
286
+ agg: Dict[str, Dict[str, List[float]]] = {
287
+ m: {} for m in metrics.keys()
288
+ }
289
+
290
+ for sl in iterate_main_simulation_logs(root):
291
+ for mname, fn in metrics.items():
292
+ try:
293
+ vals = fn(sl)
294
+ except Exception:
295
+ vals = None
296
+ if not vals:
297
+ continue
298
+ for aid, v in vals.items():
299
+ if v is None:
300
+ continue
301
+ lst = agg[mname].setdefault(str(aid), [])
302
+ try:
303
+ lst.append(float(v))
304
+ except Exception:
305
+ continue
306
+
307
+ values = finalize_rollout(agg)
308
+ rec = {
309
+ "mgid": getattr(root, "id", None),
310
+ "crn_id": getattr(root, "crn_id", None),
311
+ "iteration": iteration_name,
312
+ "stats": values,
313
+ }
314
+ w.write(json.dumps(rec, ensure_ascii=False) + "\n")
315
+
316
+ del root
317
+ gc.collect()
318
+ else:
319
+ records: List[Dict[str, Any]] = []
320
+ for iteration_folder in iteration_folders:
321
+ iteration_name = Path(iteration_folder).name
322
+ for pkl_path in stream_rollout_files(Path(iteration_folder)):
323
+ root = load_root(pkl_path)
324
+
325
+ agg: Dict[str, Dict[str, List[float]]] = {m: {} for m in metrics.keys()}
326
+ for sl in iterate_main_simulation_logs(root):
327
+ for mname, fn in metrics.items():
328
+ try:
329
+ vals = fn(sl)
330
+ except Exception:
331
+ vals = None
332
+ if not vals:
333
+ continue
334
+ for aid, v in vals.items():
335
+ if v is None:
336
+ continue
337
+ lst = agg[mname].setdefault(str(aid), [])
338
+ try:
339
+ lst.append(float(v))
340
+ except Exception:
341
+ continue
342
+
343
+ values = finalize_rollout(agg)
344
+ records.append(
345
+ {
346
+ "mgid": getattr(root, "id", None),
347
+ "crn_id": getattr(root, "crn_id", None),
348
+ "iteration": iteration_name,
349
+ "stats": values,
350
+ }
351
+ )
352
+
353
+ del root
354
+ gc.collect()
355
+
356
+ # Build dict-of-lists output
357
+ mgids: List[Any] = []
358
+ crn_ids: List[Any] = []
359
+ iterations_out: List[str] = []
360
+ stats_out: Dict[str, Any] = {}
361
+
362
+ stat_keys: set[str] = set()
363
+ nested_agent_keys: Dict[str, set[str]] = {}
364
+ for r in records:
365
+ stats = r.get("stats", {}) or {}
366
+ for k, v in stats.items():
367
+ stat_keys.add(k)
368
+ if isinstance(v, dict):
369
+ nested = nested_agent_keys.setdefault(k, set())
370
+ for ak in v.keys():
371
+ nested.add(str(ak))
372
+
373
+ for k in stat_keys:
374
+ if k in nested_agent_keys:
375
+ stats_out[k] = {ak: [] for ak in sorted(nested_agent_keys[k])}
376
+ else:
377
+ stats_out[k] = []
378
+
379
+ for r in records:
380
+ mgids.append(r.get("mgid"))
381
+ crn_ids.append(r.get("crn_id"))
382
+ iterations_out.append(r.get("iteration"))
383
+ stats = r.get("stats", {}) or {}
384
+ for k in stat_keys:
385
+ val = stats.get(k)
386
+ if isinstance(stats_out[k], dict):
387
+ agent_dict = val if isinstance(val, dict) else {}
388
+ for ak in stats_out[k].keys():
389
+ stats_out[k][ak].append(agent_dict.get(ak))
390
+ else:
391
+ stats_out[k].append(val)
392
+
393
+ with open(outfile, "w", encoding="utf-8") as w:
394
+ json.dump(
395
+ {
396
+ "mgid": mgids,
397
+ "crn_id": crn_ids,
398
+ "iteration": iterations_out,
399
+ "stats": stats_out,
400
+ },
401
+ w,
402
+ ensure_ascii=False,
403
+ )
404
+
405
+ return outfile
src_code_for_reproducibility/models/__init__.py ADDED
File without changes
src_code_for_reproducibility/models/adapter_training_wrapper.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import logging
4
+ from typing import Union
5
+ from peft import (
6
+ LoraConfig,
7
+ get_peft_model,
8
+ )
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+
13
+ class AdapterWrapper(nn.Module):
14
+ """
15
+ A thin façade that
16
+ • keeps a reference to a *shared* PEFT-wrapped model,
17
+ • ensures `set_adapter(adapter)` is called on every forward,
18
+ • exposes only the parameters that should be trained for that adapter
19
+ (plus whatever extra modules you name).
20
+ """
21
+ def __init__(
22
+ self,
23
+ shared_llm: nn.Module,
24
+ adapter_id: str,
25
+ lora_config: dict,
26
+ path: Union[str, None] = None,
27
+ ):
28
+ super().__init__()
29
+ self.shared_llm = shared_llm
30
+ self.adapter_id = adapter_id
31
+ lora_config = LoraConfig(**lora_config)
32
+ # this modifies the shared llm in place, adding a lora adapter inside
33
+ self.shared_llm = get_peft_model(
34
+ model=shared_llm,
35
+ peft_config=lora_config,
36
+ adapter_name=adapter_id,
37
+ )
38
+ self.shared_llm.train()
39
+ # Load external adapter weights if provided
40
+ loaded_from: str | None = None
41
+ if path:
42
+ try:
43
+ # Supports both local filesystem paths and HF Hub repo IDs
44
+ self.shared_llm.load_adapter(
45
+ is_trainable=True,
46
+ model_id=path,
47
+ adapter_name=adapter_id,
48
+ )
49
+ loaded_from = path
50
+ except Exception as exc: # noqa: BLE001 - want to log any load failure context
51
+ logger.warning(
52
+ f"Adapter '{adapter_id}': failed to load from '{path}': {exc}"
53
+ )
54
+
55
+ if loaded_from:
56
+ logger.info(
57
+ f"Adapter '{adapter_id}': loaded initial weights from '{loaded_from}'."
58
+ )
59
+ else:
60
+ logger.info(
61
+ f"Adapter '{adapter_id}': initialized with fresh weights (no initial weights found)."
62
+ )
63
+
64
+ def parameters(self, recurse: bool = True):
65
+ """
66
+ "recurse" is just for pytorch compatibility
67
+ """
68
+ self.shared_llm.set_adapter(self.adapter_id)
69
+ params = [p for p in self.shared_llm.parameters() if p.requires_grad]
70
+
71
+ return params
72
+
73
+ def get_base_model_logits(self, contexts):
74
+ """
75
+ Run the base model (without adapter) in inference mode, without tracking gradients.
76
+ This is useful to get reference logits for KL-divergence computation.
77
+ """
78
+ with torch.no_grad():
79
+ with self.shared_llm.disable_adapter():
80
+ return self.shared_llm(input_ids=contexts)[0]
81
+
82
+ def forward(self, *args, **kwargs):
83
+ self.shared_llm.set_adapter(self.adapter_id)
84
+ return self.shared_llm(*args, **kwargs)
85
+
86
+ def save_pretrained(self, save_path):
87
+ self.shared_llm.save_pretrained(save_path)
88
+
89
+ def gradient_checkpointing_enable(self, *args, **kwargs):
90
+ self.shared_llm.gradient_checkpointing_enable(*args, **kwargs)
91
+
92
+ @property
93
+ def dtype(self):
94
+ return self.shared_llm.dtype
95
+
96
+ @property
97
+ def device(self):
98
+ return self.shared_llm.device
src_code_for_reproducibility/models/human_policy.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import os
3
+ import re
4
+ import shutil
5
+ import sys
6
+ from typing import Callable, Dict, List, Optional
7
+
8
+ from mllm.markov_games.rollout_tree import ChatTurn
9
+
10
+ try:
11
+ import rstr # For generating example strings from regex
12
+ except Exception: # pragma: no cover
13
+ rstr = None
14
+
15
+
16
+ def _clear_terminal() -> None:
17
+ """
18
+ Clear the terminal screen in a cross-platform manner.
19
+ """
20
+ if sys.stdout.isatty():
21
+ os.system("cls" if os.name == "nt" else "clear")
22
+
23
+
24
+ def _terminal_width(default: int = 100) -> int:
25
+ try:
26
+ return shutil.get_terminal_size().columns
27
+ except Exception:
28
+ return default
29
+
30
+
31
+ def _horizontal_rule(char: str = "─") -> str:
32
+ width = max(20, _terminal_width() - 2)
33
+ return char * width
34
+
35
+
36
+ class _Style:
37
+ # ANSI colors (bright, readable)
38
+ RESET = "\033[0m"
39
+ BOLD = "\033[1m"
40
+ DIM = "\033[2m"
41
+ # Foreground colors
42
+ FG_BLUE = "\033[94m" # user/system headers
43
+ FG_GREEN = "\033[92m" # human response header
44
+ FG_YELLOW = "\033[93m" # notices
45
+ FG_RED = "\033[91m" # errors
46
+ FG_MAGENTA = "\033[95m" # regex
47
+ FG_CYAN = "\033[96m" # tips
48
+
49
+
50
+ def _render_chat(state) -> str:
51
+ """
52
+ Render prior messages in a compact, readable terminal format.
53
+
54
+ Expected message dict keys: {"role": str, "content": str, ...}
55
+ """
56
+ lines: List[str] = []
57
+ lines.append(_horizontal_rule())
58
+ lines.append(f"{_Style.FG_BLUE}{_Style.BOLD} Conversation so far {_Style.RESET}")
59
+ lines.append(_horizontal_rule())
60
+ for chat in state:
61
+ role = chat.role
62
+ content = str(chat.content).strip()
63
+ # Map roles to display names and colors/emojis
64
+ if role == "assistant":
65
+ header = f"{_Style.FG_GREEN}{_Style.BOLD}HUMAN--🧑‍💻{_Style.RESET}"
66
+ elif role == "user":
67
+ header = f"{_Style.FG_BLUE}{_Style.BOLD}USER--⚙️{_Style.RESET}"
68
+ else:
69
+ header = f"[{_Style.DIM}{role.upper()}{_Style.RESET}]"
70
+ lines.append(header)
71
+ # Indent content for readability
72
+ for line in content.splitlines() or [""]:
73
+ lines.append(f" {line}")
74
+ lines.append("")
75
+ lines.append(_horizontal_rule())
76
+ return "\n".join(lines)
77
+
78
+
79
+ async def _async_input(prompt_text: str) -> str:
80
+ """Non-blocking input using a background thread."""
81
+ return await asyncio.to_thread(input, prompt_text)
82
+
83
+
84
+ def _short_regex_example(regex: str, max_len: int = 30) -> Optional[str]:
85
+ """
86
+ Try to produce a short example string that matches the regex.
87
+ We attempt multiple times and pick the first <= max_len.
88
+ """
89
+ if rstr is None:
90
+ return None
91
+ try:
92
+ for _ in range(20):
93
+ candidate = rstr.xeger(regex)
94
+ if len(candidate) <= max_len:
95
+ return candidate
96
+ # Fallback to truncation (may break match, so don't return)
97
+ return None
98
+ except Exception:
99
+ return None
100
+
101
+
102
+ def _detect_input_type(regex: str | None) -> tuple[str, str, str]:
103
+ """
104
+ Detect what type of input is expected based on the regex pattern.
105
+ Returns (input_type, start_tag, end_tag)
106
+ """
107
+ if regex is None:
108
+ return "text", "", ""
109
+
110
+ if "message_start" in regex and "message_end" in regex:
111
+ return "message", "<<message_start>>", "<<message_end>>"
112
+ elif "proposal_start" in regex and "proposal_end" in regex:
113
+ return "proposal", "<<proposal_start>>", "<<proposal_end>>"
114
+ else:
115
+ return "text", "", ""
116
+
117
+
118
+ async def human_policy(state, agent_id, regex: str | None = None) -> str:
119
+ """
120
+ Async human-in-the-loop policy.
121
+
122
+ - Displays prior conversation context in the terminal.
123
+ - Prompts the user for a response.
124
+ - If a regex is provided, validates and re-prompts until it matches.
125
+ - Automatically adds formatting tags based on expected input type.
126
+
127
+ Args:
128
+ prompt: Chat history as a list of {role, content} dicts.
129
+ regex: Optional fullmatch validation pattern.
130
+
131
+ Returns:
132
+ The user's validated response string.
133
+ """
134
+ # Detect input type and formatting
135
+ input_type, start_tag, end_tag = _detect_input_type(regex)
136
+
137
+ while True:
138
+ _clear_terminal()
139
+ print(_render_chat(state))
140
+
141
+ if regex:
142
+ example = _short_regex_example(regex, max_len=30)
143
+ print(
144
+ f"{_Style.FG_MAGENTA}{_Style.BOLD}Expected format (regex fullmatch):{_Style.RESET}"
145
+ )
146
+ print(f" {_Style.FG_MAGENTA}{regex}{_Style.RESET}")
147
+ if example:
148
+ print(
149
+ f"{_Style.FG_CYAN}Example (random, <=30 chars):{_Style.RESET} {example}"
150
+ )
151
+ print(_horizontal_rule("."))
152
+
153
+ # Custom prompt based on input type
154
+ if input_type == "message":
155
+ print(
156
+ f"{_Style.FG_YELLOW}Type your message content (formatting will be added automatically):{_Style.RESET}"
157
+ )
158
+ elif input_type == "proposal":
159
+ print(
160
+ f"{_Style.FG_YELLOW}Type your proposal (number only, formatting will be added automatically):{_Style.RESET}"
161
+ )
162
+ else:
163
+ print(
164
+ f"{_Style.FG_YELLOW}Type your response and press Enter.{_Style.RESET}"
165
+ )
166
+
167
+ print(
168
+ f"{_Style.DIM}Commands: /help to view commands, /refresh to re-render, /quit to abort{_Style.RESET}"
169
+ )
170
+ else:
171
+ print(
172
+ f"{_Style.FG_YELLOW}Type your response and press Enter.{_Style.RESET} {_Style.DIM}(/help for commands){_Style.RESET}"
173
+ )
174
+
175
+ user_in = (await _async_input("> ")).rstrip("\n")
176
+
177
+ # Commands
178
+ if user_in.strip().lower() in {"/help", "/h"}:
179
+ print(f"\n{_Style.FG_CYAN}{_Style.BOLD}Available commands:{_Style.RESET}")
180
+ print(
181
+ f" {_Style.FG_CYAN}/help{_Style.RESET} or {_Style.FG_CYAN}/h{_Style.RESET} Show this help"
182
+ )
183
+ print(
184
+ f" {_Style.FG_CYAN}/refresh{_Style.RESET} or {_Style.FG_CYAN}/r{_Style.RESET} Re-render the conversation and prompt"
185
+ )
186
+ print(
187
+ f" {_Style.FG_CYAN}/quit{_Style.RESET} or {_Style.FG_CYAN}/q{_Style.RESET} Abort the run (raises KeyboardInterrupt)"
188
+ )
189
+ await asyncio.sleep(1.0)
190
+ continue
191
+ if user_in.strip().lower() in {"/refresh", "/r"}:
192
+ continue
193
+ if user_in.strip().lower() in {"/quit", "/q"}:
194
+ raise KeyboardInterrupt("Human aborted run from human_policy")
195
+
196
+ # Add formatting tags if needed
197
+ if start_tag and end_tag:
198
+ formatted_input = f"{start_tag}{user_in}{end_tag}"
199
+ else:
200
+ formatted_input = user_in
201
+
202
+ if regex is None:
203
+ return ChatTurn(
204
+ role="assistant", agent_id=agent_id, content=formatted_input
205
+ )
206
+
207
+ # Validate against regex (fullmatch)
208
+ try:
209
+ pattern = re.compile(regex)
210
+ except re.error as e:
211
+ # If regex is invalid, fall back to accepting any input
212
+ print(
213
+ f"{_Style.FG_RED}Warning:{_Style.RESET} Provided regex is invalid: {e}. Accepting input without validation."
214
+ )
215
+ await asyncio.sleep(0.5)
216
+ return ChatTurn(
217
+ role="assistant", agent_id=agent_id, content=formatted_input
218
+ )
219
+
220
+ if pattern.fullmatch(formatted_input):
221
+ return ChatTurn(
222
+ role="assistant", agent_id=agent_id, content=formatted_input
223
+ )
224
+
225
+ # Show validation error and re-prompt
226
+ print("")
227
+ print(
228
+ f"{_Style.FG_RED}{_Style.BOLD}Input did not match the required format.{_Style.RESET} Please try again."
229
+ )
230
+
231
+ if input_type == "message":
232
+ print(
233
+ f"You entered: {_Style.FG_CYAN}{start_tag}{user_in}{end_tag}{_Style.RESET}"
234
+ )
235
+ print(f"Just type the message content without tags.")
236
+ elif input_type == "proposal":
237
+ print(
238
+ f"You entered: {_Style.FG_CYAN}{start_tag}{user_in}{end_tag}{_Style.RESET}"
239
+ )
240
+ print(f"Just type the number without tags.")
241
+ else:
242
+ print(f"Expected (regex):")
243
+ print(f" {_Style.FG_MAGENTA}{regex}{_Style.RESET}")
244
+
245
+ print(_horizontal_rule("."))
246
+ print(f"{_Style.FG_YELLOW}Press Enter to retry...{_Style.RESET}")
247
+ await _async_input("")
248
+
249
+
250
+ def get_human_policies() -> Dict[str, Callable[[List[Dict]], str]]:
251
+ """
252
+ Expose the human policy in the same map shape used elsewhere.
253
+ """
254
+ # Type hint says Callable[[List[Dict]], str] but we intentionally return the async callable.
255
+ return {"human_policy": human_policy} # type: ignore[return-value]
src_code_for_reproducibility/models/inference_backend.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+ from dataclasses import dataclass
3
+ from typing import Any, Optional
4
+
5
+
6
+ @dataclass
7
+ class LLMInferenceOutput:
8
+ content: str
9
+ reasoning_content: str | None = None
10
+ log_probs: list[float] | None = None
11
+ out_token_ids: list[int] | None = None
12
+
13
+
14
+ class LLMInferenceBackend(ABC):
15
+ @abstractmethod
16
+ def __init__(self, **kwargs):
17
+ ...
18
+
19
+ @abstractmethod
20
+ def prepare_adapter(
21
+ self, adapter_id: str, weights_got_updated: bool = False
22
+ ) -> None:
23
+ """Ensure adapter is ready/loaded for next generation call."""
24
+
25
+ @abstractmethod
26
+ async def generate(self, prompt: list[dict], regex: Optional[str] = None) -> str:
27
+ ...
28
+
29
+ @abstractmethod
30
+ def toggle_training_mode(self) -> None:
31
+ ...
32
+
33
+ @abstractmethod
34
+ def toggle_eval_mode(self) -> None:
35
+ ...
36
+
37
+ @abstractmethod
38
+ def shutdown(self) -> None:
39
+ ...
src_code_for_reproducibility/models/inference_backend_dummy.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ from typing import Optional
3
+
4
+ import rstr
5
+ from transformers import AutoTokenizer
6
+
7
+ from mllm.models.inference_backend import LLMInferenceBackend, LLMInferenceOutput
8
+ from mllm.utils.short_id_gen import generate_short_id
9
+
10
+
11
+ class DummyInferenceBackend(LLMInferenceBackend):
12
+ def __init__(
13
+ self,
14
+ *args,
15
+ **kwargs,
16
+ ):
17
+ pass
18
+
19
+ def prepare_adapter(
20
+ self,
21
+ adapter_id: Optional[str],
22
+ weights_got_updated: bool,
23
+ adapter_path: Optional[str] = None,
24
+ ) -> None:
25
+ pass
26
+
27
+ async def toggle_training_mode(self) -> None:
28
+ await asyncio.sleep(0)
29
+ pass
30
+
31
+ async def toggle_eval_mode(self) -> None:
32
+ await asyncio.sleep(0)
33
+ pass
34
+
35
+ def shutdown(self) -> None:
36
+ pass
37
+
38
+ async def generate(
39
+ self,
40
+ prompt_text: str,
41
+ regex: Optional[str] = None,
42
+ extract_thinking: bool = False,
43
+ ) -> LLMInferenceOutput:
44
+ if regex:
45
+ # Create random string that respects the regex
46
+ return LLMInferenceOutput(
47
+ content=rstr.xeger(regex),
48
+ reasoning_content="I don't think, I am a dummy backend.",
49
+ )
50
+ else:
51
+ return LLMInferenceOutput(
52
+ content="I am a dummy backend without a regex.",
53
+ reasoning_content="I don't think, I am a dummy backend.",
54
+ )
src_code_for_reproducibility/models/inference_backend_sglang.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # new_backend_sglang_offline.py
2
+ from __future__ import annotations
3
+
4
+ import asyncio
5
+ from typing import Any, Optional
6
+
7
+ # import sglang as sgl
8
+
9
+ from mllm.models.inference_backend import LLMInferenceBackend
10
+
11
+
12
+ class SGLangOfflineBackend(LLMInferenceBackend):
13
+ def __init__(
14
+ self,
15
+ model_name: str,
16
+ tokenizer, # unused but kept for parity
17
+ adapter_paths: dict[str, str],
18
+ device: str = "cuda",
19
+ max_model_len: Optional[int] = None,
20
+ enable_lora: bool = True,
21
+ lora_target_modules: Optional[list[str] | str] = None,
22
+ max_loras_per_batch: int = 8,
23
+ engine_kwargs: dict[str, Any] = None,
24
+ ):
25
+ self.model_name = model_name
26
+ self.adapter_paths = adapter_paths
27
+ self.current_adapter: Optional[str] = None
28
+ engine_kwargs = dict(engine_kwargs or {})
29
+ # Map server-style LoRA flags to offline engine ctor
30
+ if enable_lora and adapter_paths:
31
+ engine_kwargs.setdefault("enable_lora", True)
32
+ # The offline Engine mirrors server args; pass a mapping name->path
33
+ engine_kwargs.setdefault("lora_paths", adapter_paths)
34
+ if lora_target_modules is not None:
35
+ engine_kwargs.setdefault("lora_target_modules", lora_target_modules)
36
+ engine_kwargs.setdefault("max_loras_per_batch", max_loras_per_batch)
37
+
38
+ if max_model_len is not None:
39
+ engine_kwargs.setdefault("context_length", max_model_len)
40
+
41
+ # Launch in-process engine (no HTTP server)
42
+ self.llm = sgl.Engine(model_path=model_name, **engine_kwargs) # async-ready
43
+ # SGLang supports: generate(), async_generate(), and async streaming helpers. :contentReference[oaicite:2]{index=2}
44
+
45
+ def is_ready(self) -> bool:
46
+ return True
47
+
48
+ def toggle_training_mode(self) -> None:
49
+ # No explicit KV release API offline; typically you pause usage here.
50
+ pass
51
+
52
+ def toggle_eval_mode(self) -> None:
53
+ pass
54
+
55
+ def shutdown(self) -> None:
56
+ # Engine cleans up on GC; explicit close not required.
57
+ pass
58
+
59
+ def prepare_adapter(self, adapter_id: Optional[str]) -> None:
60
+ # With offline Engine, when LoRA is enabled at init,
61
+ # you select adapter per request via the input batch mapping.
62
+ self.current_adapter = adapter_id
63
+
64
+ async def generate(
65
+ self, prompt_text: str, sampling_params: dict, adapter_id: Optional[str]
66
+ ) -> str:
67
+ # Non-streaming async (batch of 1). For batched prompts, pass a list.
68
+ params = {
69
+ "temperature": sampling_params.get("temperature", 1.0),
70
+ "top_p": sampling_params.get("top_p", 1.0),
71
+ "max_new_tokens": sampling_params.get("max_new_tokens", 128),
72
+ }
73
+ if (tk := sampling_params.get("top_k", -1)) and tk > 0:
74
+ params["top_k"] = tk
75
+ if (mn := sampling_params.get("min_new_tokens")) is not None:
76
+ params["min_new_tokens"] = mn
77
+ if (fp := sampling_params.get("frequency_penalty")) is not None:
78
+ params["frequency_penalty"] = fp
79
+
80
+ # If using multi-LoRA, SGLang lets you provide adapter names aligned to each input.
81
+ prompts = [prompt_text]
82
+ adapters = [adapter_id] if adapter_id else None # or omit for base
83
+ outs = await self.llm.async_generate(
84
+ prompts, params, adapters
85
+ ) # :contentReference[oaicite:3]{index=3}
86
+ return outs[0]["text"]
src_code_for_reproducibility/models/inference_backend_sglang_local_server.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import httpx
4
+ import requests
5
+ from sglang.utils import launch_server_cmd, wait_for_server
6
+
7
+ from mllm.models.inference_backend import LLMInferenceBackend
8
+
9
+
10
+ class HttpSGLangBackend(LLMInferenceBackend):
11
+ def __init__(self, **kwargs):
12
+ super().__init__(**kwargs)
13
+ self.port = None
14
+ self.proc = None
15
+ self.urls = {}
16
+ # track sglang adapter ids separately from your logical ids
17
+ self.sglang_names = {aid: aid for aid in self.adapter_paths.keys()}
18
+ self.needs_loading = {aid: True for aid in self.adapter_paths.keys()}
19
+
20
+ # defaults you already used:
21
+ self.mem_fraction = kwargs.get("mem_fraction_static", 0.6)
22
+ self.dtype = kwargs.get("dtype", "bfloat16")
23
+ self.extra_cli = kwargs.get("extra_cli", "")
24
+ self.disable_radix_cache = kwargs.get("disable_radix_cache", True)
25
+
26
+ def launch(self) -> None:
27
+ # find local hf cache path for server
28
+ from transformers.utils import cached_file
29
+
30
+ local_llm_path = os.path.split(cached_file(self.model_name, "config.json"))[0]
31
+
32
+ lora_str = ""
33
+ if self.adapter_paths:
34
+ lora_str = "--lora-paths " + " ".join(
35
+ f"{aid}={path}" for aid, path in self.adapter_paths.items()
36
+ )
37
+
38
+ cmd = f"""
39
+ python3 -m sglang.launch_server --model-path {local_llm_path} \
40
+ --host 0.0.0.0 {lora_str} \
41
+ {'--disable-radix-cache' if self.disable_radix_cache else ''} \
42
+ --mem-fraction-static {self.mem_fraction} --dtype {self.dtype} {self.extra_cli}
43
+ """
44
+ self.proc, self.port = launch_server_cmd(cmd)
45
+ wait_for_server(f"http://localhost:{self.port}")
46
+ base = f"http://localhost:{self.port}"
47
+ self.urls = dict(
48
+ generate=f"{base}/generate",
49
+ release=f"{base}/release_memory_occupation",
50
+ resume=f"{base}/resume_memory_occupation",
51
+ load_lora=f"{base}/load_lora_adapter",
52
+ unload_lora=f"{base}/unload_lora_adapter",
53
+ )
54
+
55
+ def is_ready(self) -> bool:
56
+ try:
57
+ requests.get(self.urls["generate"], timeout=2)
58
+ return True
59
+ except Exception:
60
+ return False
61
+
62
+ def prepare_adapter(self, adapter_id: str) -> None:
63
+ if adapter_id is None:
64
+ return
65
+ if self.needs_loading.get(adapter_id, False):
66
+ # unload old name if present
67
+ try:
68
+ requests.post(
69
+ self.urls["unload_lora"],
70
+ json={"lora_name": self.sglang_names[adapter_id]},
71
+ timeout=10,
72
+ )
73
+ except Exception:
74
+ pass
75
+ new_name = self._short_id()
76
+ self.sglang_names[adapter_id] = new_name
77
+ requests.post(
78
+ self.urls["load_lora"],
79
+ json={
80
+ "lora_name": new_name,
81
+ "lora_path": self.adapter_paths[adapter_id],
82
+ },
83
+ ).raise_for_status()
84
+ self.needs_loading[adapter_id] = False
85
+
86
+ async def generate(
87
+ self, prompt_text: str, sampling_params: dict, adapter_id: str | None
88
+ ) -> str:
89
+ lora_name = self.sglang_names.get(adapter_id) if adapter_id else None
90
+ payload = {
91
+ "text": [prompt_text],
92
+ "sampling_params": sampling_params,
93
+ }
94
+ if lora_name:
95
+ payload["lora_path"] = [lora_name]
96
+
97
+ timeout = httpx.Timeout(3600.0, connect=3600.0)
98
+ async with httpx.AsyncClient(timeout=timeout) as client:
99
+ resp = await client.post(self.urls["generate"], json=payload)
100
+ resp.raise_for_status()
101
+ return resp.json()[0]["text"]
102
+
103
+ def toggle_training_mode(self) -> None:
104
+ # free KV space while training adapters
105
+ requests.post(
106
+ self.urls["release"], json={"tags": ["kv_cache"]}
107
+ ).raise_for_status()
108
+
109
+ def toggle_eval_mode(self) -> None:
110
+ # re-allocate KV space
111
+ try:
112
+ requests.post(
113
+ self.urls["resume"], json={"tags": ["kv_cache"]}
114
+ ).raise_for_status()
115
+ except Exception:
116
+ pass
117
+
118
+ def shutdown(self) -> None:
119
+ from sglang.utils import terminate_process
120
+
121
+ if self.proc:
122
+ terminate_process(self.proc)
123
+
124
+ def _short_id(self) -> str:
125
+ import uuid
126
+
127
+ return str(uuid.uuid4().int)[:8]
src_code_for_reproducibility/models/inference_backend_vllm.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import re
3
+ from typing import Optional
4
+
5
+ import torch
6
+ from transformers import AutoTokenizer
7
+ from vllm import AsyncEngineArgs, AsyncLLMEngine, SamplingParams
8
+ from vllm.inputs import TokensPrompt
9
+ from vllm.lora.request import LoRARequest
10
+ from vllm.sampling_params import GuidedDecodingParams, RequestOutputKind
11
+
12
+ from mllm.models.inference_backend import LLMInferenceBackend, LLMInferenceOutput
13
+ from mllm.utils.short_id_gen import generate_short_id
14
+
15
+
16
+ class VLLMAsyncBackend(LLMInferenceBackend):
17
+ def __init__(
18
+ self,
19
+ model_name: str,
20
+ tokenizer: AutoTokenizer,
21
+ # adapter_paths: dict[str, str],
22
+ engine_init_kwargs: dict = {},
23
+ sampling_params: dict = {},
24
+ ):
25
+ self.model_name = model_name
26
+ # self.adapter_paths = adapter_paths or {}
27
+ # self.current_adapter = None
28
+ # self.vllm_adapter_ids = {
29
+ # adapter_id: generate_short_id() for adapter_id in adapter_paths.keys()
30
+ # }
31
+ self.vllm_adapter_ids = {}
32
+ ea = dict(model=model_name, **engine_init_kwargs)
33
+ # ea["enable_lora"] = True
34
+ # ea["max_loras"] = len(self.vllm_adapter_ids)
35
+ # ea["enable_sleep_mode"] = True
36
+ self.engine = AsyncLLMEngine.from_engine_args(AsyncEngineArgs(**ea))
37
+
38
+ self.sampling_params = sampling_params
39
+ self.tokenizer = tokenizer
40
+
41
+ def prepare_adapter(
42
+ self,
43
+ adapter_id: Optional[str],
44
+ adapter_path: Optional[str],
45
+ weights_got_updated: bool,
46
+ ) -> None:
47
+ # self.current_adapter = adapter_id
48
+ if weights_got_updated:
49
+ self.vllm_adapter_ids[adapter_id] = generate_short_id()
50
+ self.current_lora_request = LoRARequest(
51
+ adapter_id,
52
+ self.vllm_adapter_ids[adapter_id],
53
+ adapter_path,
54
+ )
55
+
56
+ async def toggle_training_mode(self) -> None:
57
+ await self.engine.sleep(level=1)
58
+
59
+ async def toggle_eval_mode(self) -> None:
60
+ await self.engine.wake_up()
61
+
62
+ def shutdown(self) -> None:
63
+ # No explicit close call; engine stops when process exits.
64
+ pass
65
+
66
+ async def generate(
67
+ self,
68
+ input_token_ids: list[int],
69
+ regex: Optional[str] = None,
70
+ extract_thinking: bool = False,
71
+ ) -> LLMInferenceOutput:
72
+ # Build SamplingParams correctly
73
+ guided = GuidedDecodingParams(regex=regex) if regex else None
74
+ sp = SamplingParams(
75
+ **self.sampling_params,
76
+ guided_decoding=guided,
77
+ output_kind=RequestOutputKind.FINAL_ONLY,
78
+ )
79
+
80
+ prompt = TokensPrompt(prompt_token_ids=input_token_ids)
81
+ request_id = f"req-{asyncio.get_running_loop().time()}"
82
+ result_generator = self.engine.generate(
83
+ prompt,
84
+ sp, # SamplingParams(...)
85
+ request_id,
86
+ lora_request=self.current_lora_request,
87
+ )
88
+
89
+ async for out in result_generator: # with FINAL_ONLY this runs once
90
+ res = out
91
+
92
+ raw_text = res.outputs[0].text
93
+ out_token_ids = res.outputs[0].token_ids
94
+ log_probs = [
95
+ logprob_dict[token_id].logprob
96
+ for token_id, logprob_dict in zip(out_token_ids, res.outputs[0].logprobs)
97
+ ]
98
+ log_probs = torch.tensor(log_probs)
99
+ out_token_ids = torch.tensor(out_token_ids, dtype=torch.long)
100
+ # for out_token_id, logprob_dict in zip(out_token_ids, res.outputs[0].logprobs):
101
+ # if logprob_dict[out_token_id].logprob < -1:
102
+ # print(f"High negative logprob {logprob_dict[out_token_id].logprob} for {logprob_dict}")
103
+ content = raw_text
104
+ reasoning_content = None
105
+
106
+ if extract_thinking:
107
+ m = re.match(
108
+ r"^\n<think>\n([\s\S]*?)</think>\n\n(.*)$", raw_text, flags=re.DOTALL
109
+ )
110
+ if m:
111
+ reasoning_content = m.group(1)
112
+ content = m.group(2)
113
+ return LLMInferenceOutput(
114
+ content=content,
115
+ reasoning_content=reasoning_content,
116
+ log_probs=log_probs,
117
+ out_token_ids=out_token_ids,
118
+ )
src_code_for_reproducibility/models/inference_backend_vllm_local_server.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import subprocess
4
+ import time
5
+
6
+ import httpx
7
+ import requests
8
+
9
+ from mllm.models.inference_backend import LLMInferenceBackend
10
+
11
+
12
+ class HttpVLLMBackend(LLMInferenceBackend):
13
+ def __init__(self, **kwargs):
14
+ super().__init__(**kwargs)
15
+ self.port = kwargs.get("port", 8000)
16
+ self.host = kwargs.get("host", "0.0.0.0")
17
+ self.proc = None
18
+ self.base_url = f"http://{self.host}:{self.port}"
19
+ # vLLM memory safety knobs
20
+ self.gpu_mem_util = kwargs.get("gpu_memory_utilization", 0.9)
21
+ self.max_model_len = kwargs.get("max_model_len", None)
22
+ self.max_num_seqs = kwargs.get("max_num_seqs", None)
23
+ self.max_batched_tokens = kwargs.get("max_num_batched_tokens", None)
24
+ self.dtype = kwargs.get("dtype", "bfloat16")
25
+ self.trust_remote_code = kwargs.get("trust_remote_code", False)
26
+ # LoRA strategy: "preload" (CLI) or "runtime" (endpoints) depending on your vLLM build
27
+ self.lora_mode = kwargs.get(
28
+ "lora_mode", "preload"
29
+ ) # "runtime" supported in newer builds
30
+ self.runtime_lora_enabled = self.lora_mode == "runtime"
31
+
32
+ # If preloading: build CLI args (adapter name -> path)
33
+ self._preload_lora_args = []
34
+ if self.adapter_paths and self.lora_mode == "preload":
35
+ # vLLM supports multiple LoRA modules via CLI in recent versions
36
+ # Example flag shapes can vary; adapt as needed for your version:
37
+ # --lora-modules adapter_id=path
38
+ for aid, pth in self.adapter_paths.items():
39
+ self._preload_lora_args += ["--lora-modules", f"{aid}={pth}"]
40
+
41
+ def launch(self):
42
+ # Build vLLM serve command
43
+ cmd = [
44
+ "python3",
45
+ "-m",
46
+ "vllm.entrypoints.openai.api_server",
47
+ "--model",
48
+ self.model_name,
49
+ "--host",
50
+ self.host,
51
+ "--port",
52
+ str(self.port),
53
+ "--dtype",
54
+ self.dtype,
55
+ "--gpu-memory-utilization",
56
+ str(self.gpu_mem_util),
57
+ ]
58
+ if self.trust_remote_code:
59
+ cmd += ["--trust-remote-code"]
60
+ if self.max_model_len:
61
+ cmd += ["--max-model-len", str(self.max_model_len)]
62
+ if self.max_num_seqs:
63
+ cmd += ["--max-num-seqs", str(self.max_num_seqs)]
64
+ if self.max_batched_tokens:
65
+ cmd += ["--max-num-batched-tokens", str(self.max_batched_tokens)]
66
+ cmd += self._preload_lora_args
67
+
68
+ self.proc = subprocess.Popen(
69
+ cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True
70
+ )
71
+ self._wait_ready()
72
+
73
+ def _wait_ready(self, timeout=120):
74
+ url = f"{self.base_url}/v1/models"
75
+ t0 = time.time()
76
+ while time.time() - t0 < timeout:
77
+ try:
78
+ r = requests.get(url, timeout=2)
79
+ if r.status_code == 200:
80
+ return
81
+ except Exception:
82
+ pass
83
+ time.sleep(1)
84
+ raise RuntimeError("vLLM server did not become ready in time")
85
+
86
+ def is_ready(self) -> bool:
87
+ try:
88
+ return (
89
+ requests.get(f"{self.base_url}/v1/models", timeout=2).status_code == 200
90
+ )
91
+ except Exception:
92
+ return False
93
+
94
+ def prepare_adapter(self, adapter_id: str) -> None:
95
+ if not adapter_id or not self.runtime_lora_enabled:
96
+ return
97
+ # Newer vLLM builds expose runtime LoRA endpoints. If yours differs,
98
+ # adjust the path/body here and keep the interface stable.
99
+ try:
100
+ requests.post(
101
+ f"{self.base_url}/v1/load_lora_adapter",
102
+ json={
103
+ "adapter_name": adapter_id,
104
+ "adapter_path": self.adapter_paths[adapter_id],
105
+ },
106
+ timeout=10,
107
+ ).raise_for_status()
108
+ except Exception as e:
109
+ # If already loaded or endpoint not present, swallow or log
110
+ pass
111
+
112
+ async def generate(
113
+ self, prompt_text: str, sampling_params: dict, adapter_id: str | None
114
+ ) -> str:
115
+ # Map your sampling params to OpenAI schema
116
+ body = {
117
+ "model": self.model_name,
118
+ "messages": [{"role": "user", "content": prompt_text}],
119
+ "temperature": sampling_params.get("temperature", 1.0),
120
+ "top_p": sampling_params.get("top_p", 1.0),
121
+ "max_tokens": sampling_params.get("max_new_tokens", 128),
122
+ }
123
+ # Optional knobs:
124
+ if sampling_params.get("top_k", -1) and sampling_params["top_k"] > 0:
125
+ # vLLM accepts top_k via extra params; put under "extra_body"
126
+ body.setdefault("extra_body", {})["top_k"] = sampling_params["top_k"]
127
+ if sampling_params.get("min_new_tokens", None) is not None:
128
+ body.setdefault("extra_body", {})["min_tokens"] = sampling_params[
129
+ "min_new_tokens"
130
+ ]
131
+ if sampling_params.get("frequency_penalty", None) is not None:
132
+ body["frequency_penalty"] = sampling_params["frequency_penalty"]
133
+
134
+ # Select LoRA adapter
135
+ if adapter_id:
136
+ if self.runtime_lora_enabled:
137
+ body.setdefault("extra_body", {})["lora_adapter"] = adapter_id
138
+ else:
139
+ # when preloaded via CLI, most builds select by name via "adapter_name"/"lora_adapter"
140
+ body.setdefault("extra_body", {})["lora_adapter"] = adapter_id
141
+
142
+ url = f"{self.base_url}/v1/chat/completions"
143
+ timeout = httpx.Timeout(3600.0, connect=3600.0)
144
+ async with httpx.AsyncClient(timeout=timeout) as client:
145
+ resp = await client.post(url, json=body)
146
+ resp.raise_for_status()
147
+ data = resp.json()
148
+ return data["choices"][0]["message"]["content"]
149
+
150
+ def toggle_training_mode(self) -> None:
151
+ # vLLM doesn’t expose an explicit KV “release” toggle via API.
152
+ # Strategy: keep inference server idle during training, or run training in a separate process.
153
+ pass
154
+
155
+ def toggle_eval_mode(self) -> None:
156
+ pass
157
+
158
+ def shutdown(self) -> None:
159
+ if self.proc:
160
+ self.proc.terminate()
src_code_for_reproducibility/models/large_language_model_api.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import copy
5
+ import os
6
+ import random
7
+ import re
8
+ from typing import Any, Callable, Dict, List, Optional, Sequence
9
+
10
+ import backoff
11
+ from openai import AsyncOpenAI, OpenAIError
12
+
13
+ from mllm.markov_games.rollout_tree import ChatTurn
14
+ from mllm.models.inference_backend import LLMInferenceOutput
15
+
16
+ # TODO: Get this automatically from OpenAI
17
+ reasoning_models = [
18
+ "gpt-5-nano",
19
+ "gpt-5-mini",
20
+ "gpt-5",
21
+ "o1-mini",
22
+ "o1",
23
+ "o1-pro",
24
+ "o3-mini",
25
+ "o3",
26
+ "o3-pro",
27
+ "o4-mini",
28
+ "o4",
29
+ "o4-pro",
30
+ ]
31
+
32
+
33
+ class LargeLanguageModelOpenAI:
34
+ """Tiny async wrapper for OpenAI Chat Completions."""
35
+
36
+ def __init__(
37
+ self,
38
+ llm_id: str = "",
39
+ model: str = "gpt-4.1-mini",
40
+ api_key: Optional[str] = None,
41
+ base_url: Optional[str] = None,
42
+ timeout_s: float = 300.0,
43
+ regex_max_attempts: int = 10,
44
+ sampling_params: Optional[Dict[str, Any]] = None,
45
+ init_kwargs: Optional[Dict[str, Any]] = None,
46
+ output_directory: Optional[str] = None,
47
+ ) -> None:
48
+ self.llm_id = llm_id
49
+ self.model = model
50
+ key = api_key or os.getenv("OPENAI_API_KEY")
51
+ if not key:
52
+ raise RuntimeError(
53
+ "Set OPENAI_API_KEY as global environment variable or pass api_key."
54
+ )
55
+ client_kwargs: Dict[str, Any] = {"api_key": key, "timeout": timeout_s}
56
+ if base_url:
57
+ client_kwargs["base_url"] = base_url
58
+ self.client = AsyncOpenAI(**client_kwargs)
59
+
60
+ # Sampling/default request params set at init
61
+ self.sampling_params = sampling_params
62
+ self.use_reasoning = model in reasoning_models
63
+ if self.use_reasoning:
64
+ self.sampling_params["reasoning"] = {
65
+ "effort": "low",
66
+ "summary": "detailed",
67
+ }
68
+ self.regex_max_attempts = max(1, int(regex_max_attempts))
69
+
70
+ def get_inference_policies(self) -> Dict[str, Callable]:
71
+ return {
72
+ self.llm_id: self.get_action,
73
+ }
74
+
75
+ async def prepare_adapter_for_inference(self, *args: Any, **kwargs: Any) -> None:
76
+ await asyncio.sleep(0)
77
+ pass
78
+
79
+ async def toggle_eval_mode(self, *args: Any, **kwargs: Any) -> None:
80
+ await asyncio.sleep(0)
81
+ pass
82
+
83
+ async def toggle_training_mode(self, *args: Any, **kwargs: Any) -> None:
84
+ await asyncio.sleep(0)
85
+ pass
86
+
87
+ async def export_adapters(self, *args: Any, **kwargs: Any) -> None:
88
+ await asyncio.sleep(0)
89
+ pass
90
+
91
+ async def checkpoint_all_adapters(self, *args: Any, **kwargs: Any) -> None:
92
+ await asyncio.sleep(0)
93
+ pass
94
+
95
+ def extract_output_from_response(self, resp: Response) -> LLMInferenceOutput:
96
+ if len(resp.output) > 1:
97
+ summary = resp.output[0].summary
98
+ if summary != []:
99
+ reasoning_content = summary[0].text
100
+ reasoning_content = f"OpenAI Reasoning Summary: {reasoning_content}"
101
+ else:
102
+ reasoning_content = None
103
+ content = resp.output[1].content[0].text
104
+ else:
105
+ reasoning_content = None
106
+ content = resp.output[0].content[0].text
107
+
108
+ return LLMInferenceOutput(
109
+ content=content,
110
+ reasoning_content=reasoning_content,
111
+ )
112
+
113
+ @backoff.on_exception(
114
+ backoff.expo, Exception, max_time=10**10, max_tries=10**10
115
+ )
116
+ async def get_action(
117
+ self,
118
+ state: list[ChatTurn],
119
+ agent_id: str,
120
+ regex: Optional[str] = None,
121
+ ) -> LLMInferenceOutput:
122
+ # Remove any non-role/content keys from the prompt else openai will error
123
+
124
+ # TODO:
125
+ prompt = [{"role": p.role, "content": p.content} for p in state]
126
+
127
+ # if self.sleep_between_requests:
128
+ # await self.wait_random_time()
129
+
130
+ # If regex is required, prime the model and validate client-side
131
+ if regex:
132
+ constraint_msg = {
133
+ "role": "user",
134
+ "content": (
135
+ f"Output must match this regex exactly: {regex} \n"
136
+ "Return only the matching string, with no quotes or extra text."
137
+ ),
138
+ }
139
+ prompt = [constraint_msg, *prompt]
140
+ pattern = re.compile(regex)
141
+ for _ in range(self.regex_max_attempts):
142
+ resp = await self.client.responses.create(
143
+ model=self.model,
144
+ input=prompt,
145
+ **self.sampling_params,
146
+ )
147
+ policy_output = self.extract_output_from_response(resp)
148
+ if pattern.fullmatch(policy_output.content):
149
+ return policy_output
150
+ prompt = [
151
+ *prompt,
152
+ {
153
+ "role": "user",
154
+ "content": (
155
+ f"Invalid response format. Expected format (regex): {regex}\n Please try again and provide ONLY a response that matches this regex."
156
+ ),
157
+ },
158
+ ]
159
+ return policy_output
160
+
161
+ # Simple, unconstrained generation
162
+ resp = await self.client.responses.create(
163
+ model=self.model,
164
+ input=prompt,
165
+ **self.sampling_params,
166
+ )
167
+ policy_output = self.extract_output_from_response(resp)
168
+ return policy_output
169
+
170
+ def shutdown(self) -> None:
171
+ self.client = None
src_code_for_reproducibility/models/large_language_model_local.py ADDED
@@ -0,0 +1,384 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ TODO: Figure out how to tweak SGlang not to go OOM when batch size is 32. See https://github.com/sgl-project/sglang/issues/6309.
3
+ """
4
+
5
+ import logging
6
+ import os
7
+ import re
8
+ import sys
9
+ import uuid
10
+ from collections.abc import Callable
11
+ from copy import deepcopy
12
+ from datetime import datetime
13
+ from typing import Literal
14
+
15
+ import httpx
16
+ import requests
17
+ import torch
18
+ import torch.nn as nn
19
+
20
+ # from sglang.utils import (
21
+ # launch_server_cmd,
22
+ # print_highlight,
23
+ # terminate_process,
24
+ # wait_for_server,
25
+ # )
26
+ from torch.optim import SGD, Adam, AdamW, RMSprop
27
+ from transformers import AutoModelForCausalLM, AutoTokenizer
28
+ from trl import AutoModelForCausalLMWithValueHead
29
+
30
+ from mllm.chat_utils.apply_template import chat_turns_to_token_ids
31
+ from mllm.markov_games.rollout_tree import ChatTurn
32
+ from mllm.models.adapter_training_wrapper import AdapterWrapper
33
+ from mllm.models.inference_backend import LLMInferenceOutput
34
+ from mllm.models.inference_backend_dummy import DummyInferenceBackend
35
+ from mllm.models.inference_backend_sglang import SGLangOfflineBackend
36
+ from mllm.models.inference_backend_vllm import VLLMAsyncBackend
37
+
38
+ logger = logging.getLogger(__name__)
39
+ logger.addHandler(logging.StreamHandler(sys.stdout))
40
+
41
+ AdapterID = str
42
+ PolicyID = str
43
+
44
+
45
+ class LeanLocalLLM:
46
+ """
47
+ TOWRITE
48
+ """
49
+
50
+ def __init__(
51
+ self,
52
+ llm_id: str = "base_llm",
53
+ model_name: str = "Qwen/Qwen3-4B-Instruct-2507",
54
+ device: str = "cuda",
55
+ hf_kwargs: dict = {},
56
+ adapter_configs: dict = {},
57
+ output_directory: str = "./models/",
58
+ inference_backend: Literal["vllm", "sglang", "dummy"] = "vllm",
59
+ inference_backend_sampling_params: dict = {},
60
+ inference_backend_init_kwargs: dict = {},
61
+ initial_adapter_paths: dict[str, str] | None = None,
62
+ initial_buffer_paths: list[str] | None = None,
63
+ enable_thinking: bool = None,
64
+ regex_max_attempts: int = -1,
65
+ max_thinking_characters: int = 0,
66
+ ):
67
+ self.inference_backend_name = inference_backend
68
+ self.output_directory = output_directory
69
+ self.llm_id = llm_id
70
+ self.device = torch.device(device) if device else torch.device("cuda")
71
+ self.model_name = model_name
72
+ self.adapter_configs = adapter_configs
73
+ self.adapter_ids = list(adapter_configs.keys())
74
+ self.enable_thinking = enable_thinking
75
+ self.regex_max_attempts = regex_max_attempts
76
+ self.initial_buffer_paths = initial_buffer_paths
77
+ self.max_thinking_characters = max_thinking_characters
78
+ self.regex_retries_count = 0
79
+
80
+ # Optional user-specified initial adapter weight locations (local or HF Hub)
81
+ # Format: {adapter_id: path_or_repo_id}
82
+ self.initial_adapter_paths: dict[str, str] | None = initial_adapter_paths
83
+
84
+ # Path management / imports
85
+ self.save_path = str(os.path.join(output_directory, model_name, "adapters"))
86
+ self.adapter_paths = {
87
+ adapter_id: os.path.join(self.save_path, adapter_id)
88
+ for adapter_id in self.adapter_ids
89
+ }
90
+ checkpoints_dir = os.path.join(self.output_directory, "checkpoints")
91
+ self.past_agent_adapter_paths = {}
92
+ if os.path.isdir(checkpoints_dir):
93
+ for dirname in os.listdir(checkpoints_dir):
94
+ dirpath = os.path.join(checkpoints_dir, dirname)
95
+ if os.path.isdir(dirpath):
96
+ self.past_agent_adapter_paths[f"{dirname}_buffer"] = os.path.join(
97
+ dirpath, "agent_adapter"
98
+ )
99
+ logger.info(
100
+ f"Loaded {len(self.past_agent_adapter_paths)} past agent adapters from checkpoints directory."
101
+ )
102
+ if self.initial_buffer_paths is not None:
103
+ previous_count = len(self.past_agent_adapter_paths)
104
+ for path in self.initial_buffer_paths:
105
+ if os.path.isdir(path):
106
+ for dirname in os.listdir(path):
107
+ dirpath = os.path.join(path, dirname)
108
+ if os.path.isdir(dirpath):
109
+ self.past_agent_adapter_paths[
110
+ f"{dirname}_buffer"
111
+ ] = os.path.join(dirpath, "agent_adapter")
112
+ else:
113
+ logger.warning(
114
+ f"Initial buffer path {path} does not exist or is not a directory."
115
+ )
116
+ logger.info(
117
+ f"Loaded {len(self.past_agent_adapter_paths) - previous_count} past agent adapters from user-specified initial buffer paths."
118
+ )
119
+ self.past_agent_adapter_ids = list(self.past_agent_adapter_paths.keys())
120
+
121
+ # ID management for tracking adapter versions
122
+ self.adapter_train_ids = {
123
+ adapter_id: self.short_id_generator() for adapter_id in self.adapter_ids
124
+ }
125
+ # Initialize tokenizer
126
+ self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
127
+ # Setup padding token to be same as EOS token
128
+ self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
129
+ self.tokenizer.pad_token = self.tokenizer.eos_token
130
+
131
+ self.weights_got_updated: dict[AdapterID, bool] = {
132
+ adapter_id: False for adapter_id in self.adapter_ids
133
+ }
134
+ self.weights_got_updated.update(
135
+ {adapter_id: False for adapter_id in self.past_agent_adapter_ids}
136
+ )
137
+ self.current_lora_request = None
138
+ self.currently_loaded_adapter_id = None
139
+
140
+ # ---------------------------------------------------------
141
+ # Init HF model, peft adapters
142
+ # ---------------------------------------------------------
143
+ self.shared_hf_llm = AutoModelForCausalLM.from_pretrained(
144
+ pretrained_model_name_or_path=model_name,
145
+ **hf_kwargs,
146
+ )
147
+ self.hf_adapters = {}
148
+ self.optimizers = {}
149
+ for adapter_id in self.adapter_ids:
150
+ # Prefer output-folder path if it exists; else fall back to user-specified initial path if provided
151
+ output_path = os.path.join(self.save_path, adapter_id)
152
+ chosen_path: str | None = None
153
+ if os.path.isdir(output_path) and os.listdir(output_path):
154
+ chosen_path = output_path
155
+ logger.info(
156
+ f"Initializing adapter '{adapter_id}': using existing weights from output folder '{chosen_path}'."
157
+ )
158
+ elif (
159
+ self.initial_adapter_paths and adapter_id in self.initial_adapter_paths
160
+ ):
161
+ chosen_path = self.initial_adapter_paths[adapter_id]
162
+ logger.info(
163
+ f"Initializing adapter '{adapter_id}': using provided initial path '{chosen_path}'."
164
+ )
165
+ else:
166
+ logger.info(
167
+ f"Initializing adapter '{adapter_id}': no initial weights provided or found; starting from scratch."
168
+ )
169
+ hf_adapter = AdapterWrapper(
170
+ shared_llm=self.shared_hf_llm,
171
+ adapter_id=adapter_id,
172
+ lora_config=adapter_configs[adapter_id],
173
+ path=chosen_path,
174
+ ).to(device)
175
+ self.hf_adapters[adapter_id] = hf_adapter
176
+ # Persist current state of all adapters (ensures remote loads are cached to disk)
177
+ self.export_adapters()
178
+
179
+ # ---------------------------------------------------------
180
+ # Init inference inference_backend
181
+ # ---------------------------------------------------------
182
+
183
+ if inference_backend == "sglang":
184
+ self.inference_backend = SGLangOfflineBackend(
185
+ model_name=self.model_name,
186
+ save_path=self.save_path,
187
+ adapter_paths=self.adapter_paths,
188
+ tokenizer=self.tokenizer,
189
+ kwargs=inference_backend_init_kwargs,
190
+ )
191
+ elif inference_backend == "vllm":
192
+ self.inference_backend = VLLMAsyncBackend(
193
+ model_name=self.model_name,
194
+ # adapter_paths=self.adapter_paths,
195
+ tokenizer=self.tokenizer,
196
+ engine_init_kwargs=inference_backend_init_kwargs,
197
+ sampling_params=inference_backend_sampling_params,
198
+ )
199
+ elif inference_backend == "dummy":
200
+ self.inference_backend = DummyInferenceBackend()
201
+ else:
202
+ raise ValueError(f"Unknown inference_backend: {inference_backend}")
203
+
204
+ def reset_regex_retries_count(self) -> None:
205
+ self.regex_retries_count = 0
206
+
207
+ def get_inference_policies(self) -> dict[PolicyID, Callable]:
208
+ """
209
+ TOWRITE
210
+ """
211
+ policies = {}
212
+ for adapter_id in self.adapter_ids:
213
+ # define policy func
214
+ async def policy(
215
+ state: list[ChatTurn],
216
+ agent_id: str,
217
+ regex: str | None = None,
218
+ _adapter_id=adapter_id,
219
+ ):
220
+ self.prepare_adapter_for_inference(adapter_id=_adapter_id)
221
+ response = await self.get_action(state, agent_id, regex)
222
+ return response
223
+
224
+ policies[self.llm_id + "/" + adapter_id] = policy
225
+
226
+ for adapter_id in self.past_agent_adapter_ids:
227
+ # define policy func
228
+ async def policy(
229
+ state: list[ChatTurn],
230
+ agent_id: str,
231
+ regex: str | None = None,
232
+ _adapter_id=adapter_id,
233
+ ):
234
+ self.prepare_adapter_for_inference(adapter_id=_adapter_id)
235
+ response = await self.get_action(state, agent_id, regex)
236
+ return response
237
+
238
+ policies[self.llm_id + "/" + adapter_id] = policy
239
+ return policies
240
+
241
+ def get_adapter_modules(self) -> dict[PolicyID, nn.Module]:
242
+ """
243
+ Returns wrappers over the adapters which allows them be
244
+ interfaced like regular PyTorch models.
245
+ # TODO: create the adapter wrappers here
246
+ See adapter_wrapper.py
247
+ """
248
+ trainable_objects = {an: self.hf_adapters[an] for an in self.adapter_ids}
249
+ return trainable_objects
250
+
251
+ async def toggle_training_mode(self) -> None:
252
+ for adn in self.adapter_ids:
253
+ self.adapter_train_ids[adn] = self.short_id_generator()
254
+ await self.inference_backend.toggle_training_mode()
255
+
256
+ async def toggle_eval_mode(self) -> None:
257
+ await self.inference_backend.toggle_eval_mode()
258
+
259
+ def prepare_adapter_for_inference(self, adapter_id: AdapterID) -> None:
260
+ self.inference_backend.prepare_adapter(
261
+ adapter_id,
262
+ adapter_path=self.adapter_paths.get(
263
+ adapter_id, self.past_agent_adapter_paths.get(adapter_id, None)
264
+ ),
265
+ weights_got_updated=self.weights_got_updated[adapter_id],
266
+ )
267
+ self.currently_loaded_adapter_id = adapter_id
268
+ self.weights_got_updated[adapter_id] = False
269
+
270
+ # def _make_prompt_text(self, prompt: list[dict]) -> str:
271
+ # if self.enable_thinking is not None:
272
+ # prompt_text = self.tokenizer.apply_chat_template(
273
+ # prompt,
274
+ # tokenize=False,
275
+ # add_generation_prompt=True,
276
+ # enable_thinking=self.enable_thinking,
277
+ # )
278
+ # else:
279
+ # prompt_text = self.tokenizer.apply_chat_template(
280
+ # prompt,
281
+ # tokenize=False,
282
+ # add_generation_prompt=True,
283
+ # )
284
+
285
+ # return prompt_text
286
+
287
+ async def get_action(
288
+ self, state: list[ChatTurn], agent_id: str, regex: str | None = None
289
+ ) -> ChatTurn:
290
+ current_regex = regex if self.regex_max_attempts == -1 else None
291
+ pattern = re.compile(regex) if regex else None
292
+ nb_attempts = 0
293
+ state = state[:]
294
+ while True:
295
+ context_token_ids = chat_turns_to_token_ids(
296
+ chats=state,
297
+ tokenizer=self.tokenizer,
298
+ enable_thinking=self.enable_thinking,
299
+ )
300
+ # print(f"context is {self.tokenizer.decode(context_token_ids)}")
301
+ policy_output = await self.inference_backend.generate(
302
+ input_token_ids=context_token_ids.tolist(),
303
+ extract_thinking=(self.max_thinking_characters > 0),
304
+ regex=current_regex,
305
+ )
306
+ # print(f"generated: {self.tokenizer.decode(policy_output.out_token_ids)}")
307
+ if (
308
+ pattern is None
309
+ or (pattern.fullmatch(policy_output.content))
310
+ or (nb_attempts >= self.regex_max_attempts)
311
+ ):
312
+ return ChatTurn(
313
+ agent_id=agent_id,
314
+ role="assistant",
315
+ content=policy_output.content,
316
+ reasoning_content=policy_output.reasoning_content,
317
+ out_token_ids=policy_output.out_token_ids,
318
+ log_probs=policy_output.log_probs,
319
+ is_state_end=False,
320
+ )
321
+ else:
322
+ self.regex_retries_count += 1
323
+ nb_attempts += 1
324
+ logger.warning(
325
+ f"Response {policy_output.content} did not match regex: {regex}, retry {nb_attempts}/{self.regex_max_attempts}"
326
+ )
327
+ if nb_attempts == self.regex_max_attempts:
328
+ current_regex = regex
329
+ # regex_prompt = ChatTurn(
330
+ # role="user",
331
+ # content=f"Invalid response format. Expected format (regex): {current_regex}\n Please try again and provide ONLY a response that matches this regex.",
332
+ # reasoning_content=None,
333
+ # log_probs=None,
334
+ # out_token_ids=None,
335
+ # is_state_end=False,
336
+ # )
337
+ # state.append(regex_prompt)
338
+
339
+ def export_adapters(self) -> None:
340
+ """
341
+ Any peft wrapper, by default, saves all adapters, not just the one currently loaded.
342
+ """
343
+
344
+ # New version of the adapters available
345
+ for adapter_id in self.adapter_ids:
346
+ self.weights_got_updated[adapter_id] = True
347
+ for adapter_id in self.past_agent_adapter_ids:
348
+ self.weights_got_updated[adapter_id] = True
349
+
350
+ # import random
351
+ # self.save_path = self.save_path + str(random.randint(1,500))
352
+ # print(f"Save path: {self.save_path}")
353
+ # self.adapter_paths = {adapter_id:os.path.join(self.save_path, adapter_id) for adapter_id in self.adapter_ids}
354
+
355
+ adapter_id = self.adapter_ids[0]
356
+ self.hf_adapters[adapter_id].save_pretrained(self.save_path)
357
+
358
+ def checkpoint_all_adapters(self, checkpoint_indicator: str) -> None:
359
+ """
360
+ Checkpoints all adapters to the configured output directory.
361
+ """
362
+ adapter_id = self.adapter_ids[0]
363
+ output_dir = os.path.join(self.output_directory, "checkpoints")
364
+ os.makedirs(output_dir, exist_ok=True)
365
+ date_str = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
366
+ agent_adapter_dir = f"{adapter_id}-{checkpoint_indicator}-{date_str}"
367
+ export_path = os.path.join(output_dir, agent_adapter_dir)
368
+ for adapter_id in self.adapter_ids:
369
+ if "agent" in adapter_id:
370
+ self.past_agent_adapter_paths[
371
+ f"{agent_adapter_dir}_buffer"
372
+ ] = os.path.join(export_path, adapter_id)
373
+ self.past_agent_adapter_ids.append(f"{agent_adapter_dir}_buffer")
374
+ self.weights_got_updated[f"{agent_adapter_dir}_buffer"] = False
375
+ self.hf_adapters[adapter_id].save_pretrained(export_path)
376
+
377
+ def short_id_generator(self) -> str:
378
+ """
379
+ Generates a short unique ID for tracking adapter versions.
380
+
381
+ Returns:
382
+ int: An 8-digit integer ID.
383
+ """
384
+ return str(uuid.uuid4().int)[:8]
src_code_for_reproducibility/models/scalar_critic.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch, torch.nn as nn, torch.optim as optim
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ from peft import LoraConfig, get_peft_model
4
+
5
+ from mllm.models.adapter_training_wrapper import AdapterWrapper
6
+
7
+
8
+ class ScalarCritic(nn.Module):
9
+ """
10
+ A causal-LM critic_adapter + a scalar value head:
11
+ V_φ(s) = wᵀ h_last + b
12
+ Only LoRA adapters (inside critic_adapter) and the value head are trainable.
13
+ """
14
+ def __init__(self, critic_adapter: AdapterWrapper):
15
+ super().__init__()
16
+ self.critic_adapter = critic_adapter
17
+ hidden_size = self.critic_adapter.shared_llm.config.hidden_size
18
+ self.value_head = nn.Linear(hidden_size, 1).to(
19
+ dtype=critic_adapter.dtype,
20
+ device=critic_adapter.device)
21
+
22
+ def forward(self,
23
+ input_ids,
24
+ attention_mask=None,
25
+ **kwargs):
26
+ # AdapterWrapper activates its own adapter internally
27
+ outputs = self.critic_adapter(
28
+ input_ids=input_ids,
29
+ attention_mask=attention_mask,
30
+ output_hidden_states=True,
31
+ **kwargs,
32
+ )
33
+ h_last = outputs.hidden_states[-1] # (B, S, H)
34
+ values = self.value_head(h_last).squeeze(-1) # (B, S)
35
+ return values
36
+
37
+ def parameters(self, recurse: bool = True):
38
+ """Iterator over *trainable* parameters for this critic."""
39
+ # 1) LoRA params for *this* adapter
40
+ for p in self.critic_adapter.parameters():
41
+ yield p
42
+ # 2) scalar head
43
+ yield from self.value_head.parameters()
44
+
45
+ def gradient_checkpointing_enable(self, *args, **kwargs):
46
+ self.critic_adapter.gradient_checkpointing_enable(*args, **kwargs)
47
+
48
+ @property
49
+ def dtype(self):
50
+ return self.critic_adapter.dtype
51
+
52
+ @property
53
+ def device(self):
54
+ return self.critic_adapter.device
src_code_for_reproducibility/training/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Suppose we have a trajectory with 3 timesteps.
2
+ token: "0 1 2 3 4 5 6 7 8 9 . . . . ."
3
+ string: "A B C a b c A a A a b c A B C" (Capitalized = User, Lowercased = Assistant)
4
+ action_mask: "x x x ✓ ✓ ✓ x ✓ x ✓ ✓ ✓ x x x" (F = False, T = True)
5
+ rewards: "r r r r r r R R R R R R r r r"
6
+ timestep: "0 0 0 0 0 0 1 1 1 1 1 1 2 2 2"
7
+ state_ends: "x x ✓ x x x ✓ x x x x x x x ✓"
8
+
9
+ There must be one baseline flag per timestep!
10
+
11
+ Then, we might have
12
+
13
+ A naive way to interpret this is to think of the number of assistant messages as the number of
14
+ steps in the environment. However, this is not the case in practice. Indeed, in a
15
+ single simulation step,
16
+
17
+
18
+
19
+
20
+ A subtlety arises with credit assignment. In the multi-agent case, we might
src_code_for_reproducibility/training/__init__.py ADDED
File without changes
src_code_for_reproducibility/training/annealing_methods.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+
4
+ def sigmoid_annealing(step: int, temperature: float) -> float:
5
+ return 2 / (1 + np.exp(-step / temperature)) - 1
6
+
src_code_for_reproducibility/training/credit_methods.py ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ def whiten_advantages(advantages: torch.Tensor) -> torch.Tensor:
5
+ """
6
+ Whitens the advantages.
7
+ """
8
+ whitened_advantages = (advantages - torch.mean(advantages)) / (
9
+ torch.std(advantages) + 1e-9
10
+ )
11
+ return whitened_advantages
12
+
13
+
14
+ def whiten_advantages_time_step_wise(
15
+ advantages: torch.Tensor, # (B, T)
16
+ ) -> torch.Tensor:
17
+ """
18
+ Whitens the advantages.
19
+ """
20
+ assert advantages.dim() == 2, "Wrong dimensions."
21
+ whitened_advantages_time_step_wise = (
22
+ advantages - advantages.mean(dim=0, keepdim=True)
23
+ ) / (advantages.std(dim=0, keepdim=True) + 1e-9)
24
+ return whitened_advantages_time_step_wise
25
+
26
+
27
+ def get_discounted_state_visitation_credits(
28
+ credits: torch.Tensor, discount_factor: float # (B, T)
29
+ ) -> torch.Tensor:
30
+ """
31
+ Computes discounted state visitation credits for a sequence of credits.
32
+ """
33
+ return credits * (
34
+ discount_factor ** torch.arange(credits.shape[1], device=credits.device)
35
+ )
36
+
37
+
38
+ def get_discounted_returns(
39
+ rewards: torch.Tensor, # (B, T)
40
+ discount_factor: float,
41
+ ) -> torch.Tensor:
42
+ """
43
+ Computes Monte Carlo discounted returns for a sequence of rewards.
44
+
45
+ Args:
46
+ rewards (torch.Tensor): Array of rewards for each timestep.
47
+
48
+ Returns:
49
+ torch.Tensor: Array of discounted returns.
50
+ """
51
+ assert rewards.dim() == 2, "Wrong dimensions."
52
+ B, T = rewards.shape
53
+ discounted_returns = torch.zeros_like(rewards)
54
+ accumulator = torch.zeros(B, device=rewards.device, dtype=rewards.dtype)
55
+ for t in reversed(range(T)):
56
+ accumulator = rewards[:, t] + discount_factor * accumulator
57
+ discounted_returns[:, t] = accumulator
58
+ return discounted_returns
59
+
60
+
61
+ def get_rloo_credits(credits: torch.Tensor): # (B, S)
62
+ assert credits.dim() == 2, "Wrong dimensions."
63
+ rloo_baselines = torch.zeros_like(credits)
64
+ n = credits.shape[0]
65
+ if n == 1:
66
+ return credits, rloo_baselines
67
+ rloo_baselines = (torch.sum(credits, dim=0, keepdim=True) - credits) / (n - 1)
68
+ rloo_credits = credits - rloo_baselines
69
+ return rloo_credits, rloo_baselines
70
+
71
+
72
+ def get_generalized_advantage_estimates(
73
+ rewards: torch.Tensor, # (B, T)
74
+ value_estimates: torch.Tensor, # (B, T+1)
75
+ discount_factor: float,
76
+ lambda_coef: float,
77
+ ) -> torch.Tensor:
78
+ """
79
+ Computes Generalized Advantage Estimates (GAE) for a sequence of rewards and value estimates.
80
+ See https://arxiv.org/pdf/1506.02438 for details.
81
+
82
+
83
+ Returns:
84
+ torch.Tensor: Array of GAE values.
85
+ """
86
+ assert rewards.dim() == value_estimates.dim() == 2, "Wrong dimensions."
87
+
88
+ assert (
89
+ rewards.shape[0] == value_estimates.shape[0]
90
+ ), f"Got shapes {rewards.shape} and {value_estimates.shape} of rewards and value estimates."
91
+ assert (
92
+ rewards.shape[1] == value_estimates.shape[1] - 1
93
+ ), f"Got shapes {rewards.shape} and {value_estimates.shape} of rewards and value estimates."
94
+
95
+ T = rewards.shape[1]
96
+ tds = rewards + discount_factor * value_estimates[:, 1:] - value_estimates[:, :-1]
97
+ gaes = torch.zeros_like(tds)
98
+ acc = 0.0
99
+ for t in reversed(range(T)):
100
+ acc = tds[:, t] + lambda_coef * discount_factor * acc
101
+ gaes[:, t] = acc
102
+ return gaes
103
+
104
+
105
+ def get_advantage_alignment_weights(
106
+ advantages: torch.Tensor, # (B, T)
107
+ exclude_k_equals_t: bool,
108
+ gamma: float,
109
+ discount_t: bool,
110
+ ) -> torch.Tensor:
111
+ """
112
+ The advantage alignment credit is calculated as
113
+
114
+ \[
115
+ A^*(s_t, a_t, b_t) = A^1(s_t, a_t, b_t) + \beta \cdot
116
+ \left( \sum_{k < t} \gamma^{t-k} A^1(s_k, a_k, b_k) \right)
117
+ A^2(s_t, a_t, b_t)
118
+ \]
119
+
120
+ Here, the weights are defined as \( \beta \cdot
121
+ \left( \sum_{k < t} \gamma^{t-k} A^1(s_k, a_k, b_k) \)
122
+ """
123
+ T = advantages.shape[1]
124
+ discounted_advantages = advantages * (
125
+ gamma * torch.ones((1, T), device=advantages.device)
126
+ ) ** (-torch.arange(0, T, 1, device=advantages.device))
127
+ if exclude_k_equals_t:
128
+ sub = torch.eye(T, device=advantages.device)
129
+ else:
130
+ sub = torch.zeros((T, T), device=advantages.device)
131
+ # Identity is for \( k < t \), remove for \( k \leq t \)
132
+ ad_align_weights = discounted_advantages @ (
133
+ torch.triu(torch.ones((T, T), device=advantages.device)) - sub
134
+ )
135
+ t_discounts = (gamma * torch.ones((1, T), device=advantages.device)) ** (
136
+ torch.arange(0, T, 1, device=advantages.device)
137
+ )
138
+ ad_align_weights = t_discounts * ad_align_weights
139
+ if discount_t:
140
+ time_discounted_advantages = advantages * (
141
+ gamma * torch.ones((1, T), device=advantages.device)
142
+ ) ** (torch.arange(0, T, 1, device=advantages.device))
143
+ ad_align_weights = ad_align_weights - advantages + time_discounted_advantages
144
+ return ad_align_weights
145
+
146
+
147
+ def get_advantage_alignment_credits(
148
+ a1: torch.Tensor, # (B, S)
149
+ a1_alternative: torch.Tensor, # (B, S, A)
150
+ a2: torch.Tensor, # (B, S)
151
+ exclude_k_equals_t: bool,
152
+ beta: float,
153
+ gamma: float = 1.0,
154
+ use_old_ad_align: bool = False,
155
+ use_sign: bool = False,
156
+ clipping: float | None = None,
157
+ use_time_regularization: bool = False,
158
+ force_coop_first_step: bool = False,
159
+ use_variance_regularization: bool = False,
160
+ rloo_branch: bool = False,
161
+ reuse_baseline: bool = False,
162
+ mean_normalize_ad_align: bool = False,
163
+ whiten_adalign_advantages: bool = False,
164
+ whiten_adalign_advantages_time_step_wise: bool = False,
165
+ discount_t: bool = False,
166
+ ) -> torch.Tensor:
167
+ """
168
+ Calculate the advantage alignment credits with vectorization, as described in https://arxiv.org/abs/2406.14662.
169
+
170
+ Recall that the advantage opponent shaping term of the AdAlign policy gradient is:
171
+ \[
172
+ \beta \mathbb{E}_{\substack{
173
+ \tau \sim \text{Pr}_{\mu}^{\pi^1, \pi^2} \\
174
+ a_t' \sim \pi^1(\cdot \mid s_t)
175
+ }}
176
+ \left[\sum_{t=0}^\infty \gamma^{t}\left( \sum_{k\leq t} A^1(s_k,a^{\prime}_k,b_k) \right) A^{2}(s_t,a_t, b_t)\nabla_{\theta^1}\text{log } \pi^1(a_t|s_t) \right]
177
+ \]
178
+
179
+ This method computes the following:
180
+ \[
181
+ Credit(s_t, a_t, b_t) = \gamma^t \left[ A^1(s_t, a_t, b_t) + \beta \left( \sum_{k\leq t} A^1(s_k,a^{\prime}_k,b_k) \right) A^{2}(s_t,a_t, b_t) \right]
182
+ \]
183
+
184
+ Args:
185
+ a1: Advantages of the main trajectories for the current agent.
186
+ a1_alternative: Advantages of the alternative trajectories for the current agent.
187
+ a2: Advantages of the main trajectories for the other agent.
188
+ discount_factor: Discount factor for the advantage alignment.
189
+ beta: Beta parameter for the advantage alignment.
190
+ gamma: Gamma parameter for the advantage alignment.
191
+ use_sign_in_ad_align: Whether to use sign in the advantage alignment.
192
+
193
+ Returns:
194
+ torch.Tensor: The advantage alignment credits.
195
+ """
196
+
197
+ assert a1.dim() == a2.dim() == 2, "Advantages must be of shape (B, S)"
198
+ if a1_alternative is not None:
199
+ assert (
200
+ a1_alternative.dim() == 3
201
+ ), "Alternative advantages must be of shape (B, S, A)"
202
+ B, T, A = a1_alternative.shape
203
+ else:
204
+ B, T = a1.shape
205
+ assert a1.shape == a2.shape, "Not the same shape"
206
+
207
+ sub_tensors = {}
208
+
209
+ if use_old_ad_align:
210
+ ad_align_weights = get_advantage_alignment_weights(
211
+ advantages=a1,
212
+ exclude_k_equals_t=exclude_k_equals_t,
213
+ gamma=gamma,
214
+ discount_t=discount_t,
215
+ )
216
+ sub_tensors["ad_align_weights_prev"] = ad_align_weights
217
+ if exclude_k_equals_t:
218
+ ad_align_weights = gamma * ad_align_weights
219
+ else:
220
+ assert a1_alternative is not None, "Alternative advantages must be provided"
221
+ if rloo_branch:
222
+ a1_alternative = torch.cat([a1.unsqueeze(2), a1_alternative], dim=2)
223
+ a1_alternative = a1_alternative.mean(dim=2)
224
+ # print(f"a1_alternative: {a1_alternative}, a1: {a1}\n")
225
+ a1, baseline = get_rloo_credits(a1)
226
+ if reuse_baseline:
227
+ a1_alternative = a1_alternative - baseline
228
+ else:
229
+ a1_alternative, _ = get_rloo_credits(a1_alternative)
230
+ assert a1.shape == a1_alternative.shape, "Not the same shape"
231
+ ad_align_weights = get_advantage_alignment_weights(
232
+ advantages=a1_alternative,
233
+ exclude_k_equals_t=exclude_k_equals_t,
234
+ gamma=gamma,
235
+ )
236
+ sub_tensors["ad_align_weights"] = ad_align_weights
237
+
238
+ # Use sign
239
+ if use_sign:
240
+ assert beta == 1.0, "beta should be 1.0 when using sign"
241
+ positive_signs = ad_align_weights > 0
242
+ negative_signs = ad_align_weights < 0
243
+ ad_align_weights[positive_signs] = 1
244
+ ad_align_weights[negative_signs] = -1
245
+ sub_tensors["ad_align_weights_sign"] = ad_align_weights
246
+ # (rest are 0)
247
+
248
+ ###################
249
+ # Process weights
250
+ ###################
251
+
252
+ # Use clipping
253
+ if clipping not in [0.0, None]:
254
+ upper_mask = ad_align_weights > 1
255
+ lower_mask = ad_align_weights < -1
256
+
257
+ ad_align_weights = torch.clip(
258
+ ad_align_weights,
259
+ -clipping,
260
+ clipping,
261
+ )
262
+ clipping_ratio = (
263
+ torch.sum(upper_mask) + torch.sum(lower_mask)
264
+ ) / upper_mask.size
265
+ sub_tensors["clipped_ad_align_weights"] = ad_align_weights
266
+
267
+ # 1/1+t Regularization
268
+ if use_time_regularization:
269
+ t_values = torch.arange(1, T + 1).to(ad_align_weights.device)
270
+ ad_align_weights = ad_align_weights / t_values
271
+ sub_tensors["time_regularized_ad_align_weights"] = ad_align_weights
272
+
273
+ # Use coop on t=0
274
+ if force_coop_first_step:
275
+ ad_align_weights[:, 0] = 1
276
+ sub_tensors["coop_first_step_ad_align_weights"] = ad_align_weights
277
+ # # Normalize alignment terms (across same time step)
278
+ # if use_variance_regularization_in_ad_align:
279
+ # # TODO: verify
280
+ # reg_coef = torch.std(a1[:, -1]) / (torch.std(opp_shaping_terms[:, -1]) + 1e-9)
281
+ # opp_shaping_terms *= reg_coef
282
+
283
+ ####################################
284
+ # Compose elements together
285
+ ####################################
286
+
287
+ opp_shaping_terms = beta * ad_align_weights * a2
288
+ sub_tensors["ad_align_opp_shaping_terms"] = opp_shaping_terms
289
+
290
+ credits = a1 + opp_shaping_terms
291
+ if mean_normalize_ad_align:
292
+ credits = credits - credits.mean(dim=0)
293
+ sub_tensors["mean_normalized_ad_align_credits"] = credits
294
+ if whiten_adalign_advantages:
295
+ credits = (credits - credits.mean()) / (credits.std() + 1e-9)
296
+ sub_tensors["whitened_ad_align_credits"] = credits
297
+ if whiten_adalign_advantages_time_step_wise:
298
+ credits = (credits - credits.mean(dim=0, keepdim=True)) / (
299
+ credits.std(dim=0, keepdim=True) + 1e-9
300
+ )
301
+ sub_tensors["whitened_ad_align_credits_time_step_wise"] = credits
302
+ sub_tensors["final_ad_align_credits"] = credits
303
+
304
+ return credits, sub_tensors
src_code_for_reproducibility/training/tally_metrics.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from numbers import Number
3
+ from typing import Union
4
+
5
+ import wandb
6
+
7
+
8
+ class Tally:
9
+ """
10
+ Minimal scalar-first tally.
11
+ - Keys are strings.
12
+ - First add stores a scalar; subsequent adds upgrade to a list of scalars.
13
+ """
14
+
15
+ def __init__(self):
16
+ self.stats = {}
17
+
18
+ def reset(self):
19
+ self.stats = {}
20
+
21
+ def _coerce_scalar(self, value: Union[int, float]) -> Union[int, float]:
22
+ if hasattr(value, "item") and callable(getattr(value, "item")):
23
+ try:
24
+ value = value.item()
25
+ except Exception:
26
+ pass
27
+ if isinstance(value, Number):
28
+ return value
29
+ raise AssertionError("Metric must be a scalar number")
30
+
31
+ def add_metric(self, path: str, metric: Union[int, float]):
32
+ metric = float(metric)
33
+ assert isinstance(path, str), "Path must be a string."
34
+ assert isinstance(metric, float), "Metric must be a scalar number."
35
+
36
+ scalar = self._coerce_scalar(metric)
37
+ existing = self.stats.get(path)
38
+ if existing is None:
39
+ self.stats[path] = scalar
40
+ elif isinstance(existing, list):
41
+ existing.append(scalar)
42
+ else:
43
+ self.stats[path] = [existing, scalar]
44
+
45
+ def save(self, identifier: str, folder: str):
46
+ os.makedirs(name=folder, exist_ok=True)
47
+ try:
48
+ import pickle
49
+
50
+ pkl_path = os.path.join(folder, f"{identifier}.tally.pkl")
51
+ payload = self.stats
52
+ with open(pkl_path, "wb") as f:
53
+ pickle.dump(payload, f, protocol=pickle.HIGHEST_PROTOCOL)
54
+ except Exception:
55
+ pass
src_code_for_reproducibility/training/tally_rollout.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from copy import deepcopy
4
+ from typing import Union
5
+
6
+ import numpy as np
7
+ import pandas as pd
8
+ import torch
9
+ from transformers import AutoTokenizer
10
+
11
+
12
+ class RolloutTallyItem:
13
+ def __init__(self, crn_ids: list[str], rollout_ids: list[str], agent_ids: list[str], metric_matrix: torch.Tensor):
14
+ """
15
+ Initializes the RolloutTallyItem object.
16
+
17
+ Args:
18
+ crn_ids (list[str]): List of CRN IDs.
19
+ rollout_ids (list[str]): List of rollout IDs.
20
+ agent_ids (list[str]): List of agent IDs.
21
+ metric_matrix (torch.Tensor): Metric matrix.
22
+ """
23
+ if isinstance(crn_ids, torch.Tensor):
24
+ crn_ids = crn_ids.detach().cpu().numpy()
25
+ if isinstance(rollout_ids, torch.Tensor):
26
+ rollout_ids = rollout_ids.detach().cpu().numpy()
27
+ if isinstance(agent_ids, torch.Tensor):
28
+ agent_ids = agent_ids.detach().cpu().numpy()
29
+ self.crn_ids = crn_ids
30
+ self.rollout_ids = rollout_ids
31
+ self.agent_ids = agent_ids
32
+ metric_matrix = metric_matrix.detach().cpu()
33
+ assert 0 < metric_matrix.ndim <= 2, "Metric matrix must have less than or equal to 2 dimensions"
34
+ if metric_matrix.ndim == 1:
35
+ metric_matrix = metric_matrix.reshape(1, -1)
36
+ # Convert to float32 if tensor is in BFloat16 format (not supported by numpy)
37
+ if metric_matrix.dtype == torch.bfloat16:
38
+ metric_matrix = metric_matrix.float()
39
+ self.metric_matrix = metric_matrix.numpy()
40
+
41
+ class RolloutTally:
42
+ """
43
+ Tally is a utility class for collecting and storing training metrics.
44
+ It supports adding metrics at specified paths and saving them to disk.
45
+ """
46
+
47
+ def __init__(self):
48
+ """
49
+ Initializes the RolloutTally object.
50
+
51
+ Args:
52
+ tokenizer (AutoTokenizer): Tokenizer for converting token IDs to strings.
53
+ max_context_length (int, optional): Maximum context length for contextualized metrics. Defaults to 30.
54
+ """
55
+ # Array-preserving structure (leaf lists hold numpy arrays / scalars)
56
+ self.metrics = {}
57
+ # Global ordered list of sample identifiers (crn_id, rollout_id) added in the order samples are processed
58
+
59
+ def reset(self):
60
+ """
61
+ Resets the base and contextualized tallies to empty dictionaries.
62
+ """
63
+ self.metrics = {}
64
+
65
+ def get_from_nested_dict(self, dictio: dict, path: str):
66
+ """
67
+ Retrieves the value at a nested path in a dictionary.
68
+
69
+ Args:
70
+ dictio (dict): The dictionary to search.
71
+ path (list): List of keys representing the path.
72
+
73
+ Returns:
74
+ Any: The value at the specified path, or None if not found.
75
+ """
76
+ assert isinstance(path, list), "Path must be list."
77
+ for sp in path[:-1]:
78
+ dictio = dictio.setdefault(sp, {})
79
+ return dictio.get(path[-1], None)
80
+
81
+ def set_at_path(self, dictio: dict, path: str, value):
82
+ """
83
+ Sets a value at a nested path in a dictionary, creating intermediate dictionaries as needed.
84
+
85
+ Args:
86
+ dictio (dict): The dictionary to modify.
87
+ path (list): List of keys representing the path.
88
+ value (Any): The value to set at the specified path.
89
+ """
90
+ for sp in path[:-1]:
91
+ dictio = dictio.setdefault(sp, {})
92
+ dictio[path[-1]] = value
93
+
94
+
95
+ def add_metric(
96
+ self, path: list[str], rollout_tally_item: RolloutTallyItem
97
+ ):
98
+ """
99
+ Adds a metric to the base tally at the specified path.
100
+
101
+ Args:
102
+ path (list): List of keys representing the path in the base tally.
103
+ rollout_tally_item (RolloutTallyItem): The rollout tally item to add.
104
+ """
105
+ rollout_tally_item = deepcopy(rollout_tally_item)
106
+
107
+ # Update array-preserving tally
108
+ array_list = self.get_from_nested_dict(dictio=self.metrics, path=path)
109
+ if array_list is None:
110
+ self.set_at_path(dictio=self.metrics, path=path, value=[rollout_tally_item])
111
+ else:
112
+ array_list.append(rollout_tally_item)
113
+
114
+
115
+ def save(self, identifier: str, folder: str):
116
+ """
117
+ Saves the base and contextualized tallies to disk as JSON files, and also saves contextualized tallies as CSV files for each game/rollout.
118
+
119
+ Args:
120
+ path (str): Directory path where the metrics will be saved.
121
+ """
122
+ os.makedirs(name=folder, exist_ok=True)
123
+
124
+ from datetime import datetime
125
+
126
+ now = datetime.now()
127
+
128
+ # Pickle only (fastest, exact structure with numpy/scalars at leaves)
129
+ try:
130
+ import pickle
131
+
132
+ pkl_path = os.path.join(folder, f"{identifier}.rt_tally.pkl")
133
+ payload = {"metrics": self.metrics}
134
+ with open(pkl_path, "wb") as f:
135
+ pickle.dump(payload, f, protocol=pickle.HIGHEST_PROTOCOL)
136
+ except Exception:
137
+ pass
src_code_for_reproducibility/training/tally_tokenwise.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from typing import Any, Dict, List, Tuple, Union
4
+
5
+ import numpy as np
6
+ import pandas as pd
7
+ import torch
8
+ from transformers import AutoTokenizer
9
+
10
+
11
+ class ContextualizedTokenwiseTally:
12
+ """
13
+ Collect, store, and save token-level metrics per rollout.
14
+
15
+ - One DataFrame per rollout_id in `paths`
16
+ - Index = timestep (int)
17
+ - Columns are added incrementally via `add_contexts()` and `add_data()`
18
+ - Cells may contain scalars, strings, or lists (dtype=object)
19
+ """
20
+
21
+ def __init__(
22
+ self,
23
+ tokenizer: AutoTokenizer,
24
+ paths: List[str],
25
+ max_context_length: int = 30,
26
+ ):
27
+ """
28
+ Args:
29
+ tokenizer: HuggingFace tokenizer used to convert tids -> tokens
30
+ paths: rollout identifiers (parallel to batch dimension)
31
+ max_context_length: truncate context token lists to this length
32
+ """
33
+ self.tokenizer = tokenizer
34
+ self.paths = paths
35
+ self.max_context_length = max_context_length
36
+ self.tally: Dict[str, pd.DataFrame] = {path: pd.DataFrame() for path in paths}
37
+
38
+ # set later by setters
39
+ self.contexts: torch.Tensor | None = None
40
+ self.action_mask: torch.Tensor | None = None
41
+ self.range: Tuple[int, int] | None = None
42
+
43
+ # --------- Utilities ---------
44
+
45
+ def tids_to_str(self, tids: List[int]) -> List[str]:
46
+ """Convert a list of token IDs to a list of token strings."""
47
+ return self.tokenizer.convert_ids_to_tokens(tids)
48
+
49
+ def _ensure_ready(self):
50
+ assert self.action_mask is not None, "call set_action_mask(mask) first"
51
+ assert self.range is not None, "call set_range((start, end)) first"
52
+
53
+ @staticmethod
54
+ def _sanitize_filename(name: Any) -> str:
55
+ """Make a safe filename from any rollout_id."""
56
+ s = str(name)
57
+ bad = {os.sep, " ", ":", "|", "<", ">", '"', "'"}
58
+ if os.altsep is not None:
59
+ bad.add(os.altsep)
60
+ for ch in bad:
61
+ s = s.replace(ch, "_")
62
+ return s
63
+
64
+ @staticmethod
65
+ def _pad_left(seq: List[Any], length: int, pad_val: Any = "") -> List[Any]:
66
+ """Left-pad a sequence to `length` with `pad_val`."""
67
+ if len(seq) >= length:
68
+ return seq[-length:]
69
+ return [pad_val] * (length - len(seq)) + list(seq)
70
+
71
+ # --------- Setters ---------
72
+
73
+ def set_action_mask(self, action_mask: torch.Tensor):
74
+ """
75
+ action_mask: (B, S) bool or 0/1 indicating valid steps
76
+ """
77
+ self.action_mask = action_mask
78
+
79
+ def set_range(self, range: Tuple[int, int]):
80
+ """
81
+ range: slice (start, end) into self.paths for current batch
82
+ """
83
+ self.range = range
84
+
85
+ # --------- Column builders ---------
86
+
87
+ def add_contexts(self, contexts: torch.Tensor):
88
+ """
89
+ Add a single 'context' column (list[str]) for valid steps.
90
+
91
+ Expects `contexts` with shape (B, S): token id at each timestep.
92
+ For each valid timestep t, we use the last N tokens up to and including t:
93
+ window = contexts[i, max(0, t - N + 1) : t + 1]
94
+ The list is left-padded with "" to always be length N.
95
+ """
96
+ self._ensure_ready()
97
+
98
+ current_paths = self.paths[self.range[0] : self.range[1]]
99
+ B, S = contexts.shape
100
+ N = self.max_context_length
101
+
102
+ # to CPU ints once
103
+ contexts_cpu = contexts.detach().to("cpu")
104
+
105
+ for i in range(B):
106
+ rollout_id = current_paths[i]
107
+ df = self.tally.get(rollout_id, pd.DataFrame())
108
+
109
+ valid_idx = torch.nonzero(
110
+ self.action_mask[i].bool(), as_tuple=False
111
+ ).squeeze(-1)
112
+ if valid_idx.numel() == 0:
113
+ self.tally[rollout_id] = df
114
+ continue
115
+
116
+ idx_list = valid_idx.tolist()
117
+
118
+ # ensure index contains valid steps
119
+ if df.empty:
120
+ df = pd.DataFrame(index=idx_list)
121
+ else:
122
+ new_index = sorted(set(df.index.tolist()) | set(idx_list))
123
+ if list(df.index) != new_index:
124
+ df = df.reindex(new_index)
125
+
126
+ # build context windows
127
+ ctx_token_lists = []
128
+ for t in idx_list:
129
+ start = max(0, t - N + 1)
130
+ window_ids = contexts_cpu[i, start : t + 1].tolist()
131
+ window_toks = self.tids_to_str([int(x) for x in window_ids])
132
+ if len(window_toks) < N:
133
+ window_toks = [""] * (N - len(window_toks)) + window_toks
134
+ else:
135
+ window_toks = window_toks[-N:]
136
+ ctx_token_lists.append(window_toks)
137
+
138
+ # single 'context' column
139
+ if "context" not in df.columns:
140
+ df["context"] = pd.Series(index=df.index, dtype=object)
141
+ df.loc[idx_list, "context"] = pd.Series(
142
+ ctx_token_lists, index=idx_list, dtype=object
143
+ )
144
+
145
+ self.tally[rollout_id] = df
146
+
147
+ def add_data(
148
+ self,
149
+ metric_id: str,
150
+ metrics: torch.Tensor,
151
+ to_tids: bool = False,
152
+ ):
153
+ """
154
+ Add a metric column for valid steps.
155
+
156
+ Args:
157
+ metric_id: column name
158
+ metrics: shape (B, S) for scalars/ids or (B, S, K) for top-k vectors
159
+ to_tids: if True, treat ints/lists of ints as tids and convert to tokens
160
+ """
161
+ self._ensure_ready()
162
+ current_paths = self.paths[self.range[0] : self.range[1]]
163
+
164
+ if metrics.dim() == 2:
165
+ B, S = metrics.shape
166
+ elif metrics.dim() == 3:
167
+ B, S, _ = metrics.shape
168
+ else:
169
+ raise ValueError("metrics must be (B, S) or (B, S, K)")
170
+
171
+ for i in range(B):
172
+ rollout_id = current_paths[i]
173
+ df = self.tally.get(rollout_id, pd.DataFrame())
174
+
175
+ valid_idx = torch.nonzero(
176
+ self.action_mask[i].bool(), as_tuple=False
177
+ ).squeeze(-1)
178
+ if valid_idx.numel() == 0:
179
+ self.tally[rollout_id] = df
180
+ continue
181
+
182
+ idx_list = valid_idx.detach().cpu().tolist()
183
+
184
+ # Ensure index contains valid steps
185
+ if df.empty:
186
+ df = pd.DataFrame(index=idx_list)
187
+ else:
188
+ new_index = sorted(set(df.index.tolist()) | set(idx_list))
189
+ if list(df.index) != new_index:
190
+ df = df.reindex(new_index)
191
+
192
+ # Slice metrics at valid steps
193
+ m_valid = metrics[i][valid_idx]
194
+
195
+ # -> pure python lists (1D list or list-of-lists)
196
+ values = m_valid.detach().cpu().tolist()
197
+
198
+ # optional tids -> tokens
199
+ if to_tids:
200
+
201
+ def _to_tokish(x):
202
+ if isinstance(x, list):
203
+ return self.tids_to_str([int(v) for v in x])
204
+ else:
205
+ return self.tids_to_str([int(x)])[0]
206
+
207
+ values = [_to_tokish(v) for v in values]
208
+
209
+ # Ensure column exists with object dtype, then assign via aligned Series
210
+ if metric_id not in df.columns:
211
+ df[metric_id] = pd.Series(index=df.index, dtype=object)
212
+
213
+ if isinstance(values, np.ndarray):
214
+ values = values.tolist()
215
+
216
+ if len(values) != len(idx_list):
217
+ raise ValueError(
218
+ f"Length mismatch for '{metric_id}': values={len(values)} vs idx_list={len(idx_list)}"
219
+ )
220
+
221
+ df.loc[idx_list, metric_id] = pd.Series(
222
+ values, index=idx_list, dtype=object
223
+ )
224
+ self.tally[rollout_id] = df
225
+
226
+ # --------- Saving ---------
227
+
228
+ def save(self, path: str):
229
+ """
230
+ Write a manifest JSON and one CSV per rollout.
231
+
232
+ - Manifest includes metadata only (safe to JSON).
233
+ - Each rollout CSV is written with index label 'timestep'.
234
+ - Only a single 'context' column (list[str]).
235
+ """
236
+ if not self.tally or all(df.empty for df in self.tally.values()):
237
+ return
238
+
239
+ os.makedirs(path, exist_ok=True)
240
+ from datetime import datetime
241
+
242
+ now = datetime.now()
243
+
244
+ manifest = {
245
+ "created_at": f"{now:%Y-%m-%d %H:%M:%S}",
246
+ "max_context_length": self.max_context_length,
247
+ "num_rollouts": len(self.tally),
248
+ "rollouts": [],
249
+ }
250
+
251
+ for rid, df in self.tally.items():
252
+ rid_str = str(rid)
253
+ safe_name = self._sanitize_filename(rid_str)
254
+ csv_path = os.path.join(path, f"{safe_name}_tokenwise.csv")
255
+
256
+ # Put 'context' first, then the rest
257
+ cols = ["context"] + [c for c in df.columns if c != "context"]
258
+ try:
259
+ df[cols].to_csv(csv_path, index=True, index_label="timestep")
260
+ except Exception as e:
261
+ continue
262
+
263
+ manifest["rollouts"].append(
264
+ {
265
+ "rollout_id": rid_str,
266
+ "csv": csv_path,
267
+ "num_rows": int(df.shape[0]),
268
+ "columns": cols,
269
+ }
270
+ )
271
+
272
+ manifest_path = os.path.join(
273
+ path, f"tokenwise_manifest_{now:%Y-%m-%d___%H-%M-%S}.json"
274
+ )
275
+ with open(manifest_path, "w") as fp:
276
+ json.dump(manifest, fp, indent=2)
src_code_for_reproducibility/training/tokenize_chats.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import sys
3
+
4
+ import regex
5
+ import torch
6
+ from transformers import AutoTokenizer
7
+
8
+ from mllm.training.training_data_utils import TrainingChatTurn, TrajectoryBatch
9
+
10
+ logger = logging.getLogger(__name__)
11
+ logger.addHandler(logging.StreamHandler(sys.stdout))
12
+
13
+
14
+ # def get_chat_dicts(chat: list[TrainingChatTurn]) -> list[dict]:
15
+ # chat_dicts = [chat_turn.dict() for chat_turn in chat]
16
+ # return chat_dicts
17
+
18
+
19
+ def process_training_chat(
20
+ tokenizer: AutoTokenizer,
21
+ chat_history: list[TrainingChatTurn],
22
+ entropy_mask_regex: str | None = None,
23
+ exploration_prompts_to_remove: list[str] = [],
24
+ use_engine_out_token_ids: bool = False,
25
+ ) -> tuple[torch.IntTensor, torch.BoolTensor, torch.IntTensor, torch.BoolTensor]:
26
+ """Tokenize a single training chat and build aligned per-token masks.
27
+
28
+ Given an ordered list of `TrainingChatTurn`, this function tokenizes each
29
+ turn independently using the tokenizer's chat template, then concatenates
30
+ all resulting token sequences. It also constructs three parallel 1D masks
31
+ that align with the concatenated tokens:
32
+
33
+ - input_ids: token ids for the entire chat, turn by turn
34
+ - action_mask: True for tokens that belong to assistant turns (i.e., model
35
+ actions), False for tokens from other roles
36
+ - timesteps: per-token time step copied from the originating turn's
37
+ `time_step`
38
+ - state_ends_mask: True for the last token of any turn where
39
+ `is_state_end` is True, otherwise False
40
+
41
+ Important details:
42
+ - Each turn is passed as a single-message list to
43
+ `tokenizer.apply_chat_template` and flattened; the per-turn outputs are
44
+ then concatenated in the original order.
45
+ - Turn boundaries are not explicitly encoded beyond what the chat template
46
+ inserts; masks provide alignment for learning signals and state endings.
47
+ - No truncation or padding is performed here; downstream code should handle
48
+ batching/padding as needed.
49
+ - Note on dtypes: `input_ids` will be a LongTensor (int64). `action_mask`
50
+ and `state_ends_mask` are BoolTensors. `timesteps` is currently created
51
+ as a float tensor; adjust the implementation if integer dtype is
52
+ required downstream.
53
+
54
+ Args:
55
+ tokenizer: A Hugging Face tokenizer supporting `apply_chat_template`.
56
+ chat_history: Ordered list of `TrainingChatTurn` forming one dialogue.
57
+
58
+ Returns:
59
+ A tuple of four 1D tensors, all of equal length N (the total number of
60
+ tokens across all turns), in the following order:
61
+ - input_ids (LongTensor)
62
+ - action_mask (BoolTensor)
63
+ - timesteps (FloatTensor as implemented; see note above)
64
+ - state_ends_mask (BoolTensor)
65
+ """
66
+ state_ends_mask = []
67
+ input_ids = []
68
+ action_mask = []
69
+ timesteps = []
70
+ entropy_mask = []
71
+ engine_log_probs = []
72
+ for train_chat_turn in chat_history:
73
+ is_state_end = train_chat_turn.is_state_end
74
+ time_step = train_chat_turn.time_step
75
+ is_action = train_chat_turn.role == "assistant"
76
+
77
+ # Remove exploration prompts from training data
78
+ for exploration_prompt in exploration_prompts_to_remove:
79
+ if exploration_prompt in train_chat_turn.content:
80
+ train_chat_turn.content = train_chat_turn.content.replace(
81
+ exploration_prompt, ""
82
+ )
83
+
84
+ chat_turn = {
85
+ "role": train_chat_turn.role,
86
+ "content": train_chat_turn.content,
87
+ }
88
+ if entropy_mask_regex is not None:
89
+ is_entropy_mask_true = (
90
+ regex.search(entropy_mask_regex, train_chat_turn.content) is not None
91
+ )
92
+ else:
93
+ is_entropy_mask_true = True
94
+ if is_action:
95
+ chat_turn_ids = train_chat_turn.out_token_ids
96
+ nb_chat_turns_ids = chat_turn_ids.numel()
97
+ action_mask.append(torch.ones(nb_chat_turns_ids, dtype=torch.bool))
98
+ engine_log_probs.append(train_chat_turn.log_probs)
99
+ else:
100
+ chat_turn_ids = train_chat_turn.chat_template_token_ids
101
+ nb_chat_turns_ids = chat_turn_ids.numel()
102
+ action_mask.append(torch.zeros(nb_chat_turns_ids, dtype=torch.bool))
103
+ engine_log_probs.append(torch.zeros(nb_chat_turns_ids, dtype=torch.float))
104
+ nb_chat_turns_ids = chat_turn_ids.numel()
105
+ state_ends_mask.append(torch.zeros(nb_chat_turns_ids, dtype=torch.bool))
106
+ if is_state_end:
107
+ state_ends_mask[-1][-1] = True # last token is state end
108
+ input_ids.append(chat_turn_ids)
109
+ entropy_mask.append(torch.ones(nb_chat_turns_ids, dtype=torch.bool))
110
+ if not is_entropy_mask_true:
111
+ entropy_mask[-1] = entropy_mask[-1] * False
112
+ timesteps.append(torch.ones(nb_chat_turns_ids) * time_step)
113
+ input_ids = torch.cat(input_ids)
114
+ action_mask = torch.cat(action_mask)
115
+ entropy_mask = torch.cat(entropy_mask)
116
+ timesteps = torch.cat(timesteps)
117
+ timesteps = timesteps.to(torch.long)
118
+ state_ends_mask = torch.cat(state_ends_mask)
119
+ engine_log_probs = torch.cat(engine_log_probs)
120
+
121
+ return (
122
+ input_ids,
123
+ action_mask,
124
+ entropy_mask,
125
+ timesteps,
126
+ state_ends_mask,
127
+ engine_log_probs,
128
+ )
src_code_for_reproducibility/training/trainer_ad_align.py ADDED
@@ -0,0 +1,495 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import logging
3
+ import sys
4
+ from dataclasses import dataclass
5
+ from typing import Tuple
6
+
7
+ import torch
8
+ from torch.nn.utils.rnn import pad_sequence
9
+
10
+ from mllm.markov_games.rollout_tree import (
11
+ ChatTurn,
12
+ RolloutTreeBranchNode,
13
+ RolloutTreeRootNode,
14
+ )
15
+ from mllm.training.credit_methods import (
16
+ get_advantage_alignment_credits,
17
+ get_discounted_state_visitation_credits,
18
+ )
19
+ from mllm.training.tally_metrics import Tally
20
+ from mllm.training.tally_rollout import RolloutTally, RolloutTallyItem
21
+ from mllm.training.tally_tokenwise import ContextualizedTokenwiseTally
22
+ from mllm.training.tokenize_chats import process_training_chat
23
+ from mllm.training.trainer_common import BaseTrainer
24
+ from mllm.training.training_data_utils import (
25
+ AdvantagePacket,
26
+ TrainingBatch,
27
+ TrainingChatTurn,
28
+ TrajectoryBatch,
29
+ get_main_chat_list_and_rewards,
30
+ get_tokenwise_credits,
31
+ )
32
+ from mllm.utils.resource_context import resource_logger_context
33
+
34
+ logger = logging.getLogger(__name__)
35
+ logger.addHandler(logging.StreamHandler(sys.stdout))
36
+
37
+ RolloutId = int
38
+ AgentId = str
39
+
40
+
41
+ @dataclass
42
+ class AdAlignTrainingData:
43
+ agent_id: str
44
+ main_data: TrajectoryBatch
45
+ # list-of-tensors: per rollout advantages with length jT
46
+ main_advantages: list[torch.FloatTensor] | None = None
47
+ # list-of-tensors: per rollout matrix (jT, A)
48
+ alternative_advantages: list[torch.FloatTensor] | None = None
49
+ advantage_alignment_credits: list[torch.FloatTensor] | None = None
50
+
51
+
52
+ def get_alternative_chat_histories(
53
+ agent_id: str, root: RolloutTreeRootNode
54
+ ) -> list[list[TrainingChatTurn], list[torch.FloatTensor]]:
55
+ """
56
+ args:
57
+ agent_id: The agent we want to get the chat history for.
58
+ root: The root of the rollout tree.
59
+ returns:
60
+ alternative_chats: list[list[TrainingChatTurn]] (jT*A, jS')
61
+ alternative_rewards: list[torch.FloatTensor] (jT*A, jT')
62
+ """
63
+ current_node = root.child
64
+ branches = current_node.branches
65
+ pre_branch_chat = []
66
+ pre_branch_rewards = []
67
+ alternative_rewards = []
68
+ alternative_chats = []
69
+ while current_node is not None:
70
+ assert isinstance(
71
+ current_node, RolloutTreeBranchNode
72
+ ), "Current node should be a branch node."
73
+ main_node = current_node.main_child
74
+ branches = current_node.branches
75
+ current_node = main_node.child
76
+
77
+ # Get the `A` alternative trajectories
78
+ alternative_nodes = branches[agent_id]
79
+ for alt_node in alternative_nodes:
80
+ post_branch_chat, post_branch_rewards = get_main_chat_list_and_rewards(
81
+ agent_id=agent_id, root=alt_node
82
+ )
83
+ branch_chat = pre_branch_chat + post_branch_chat
84
+ alternative_chats.append(branch_chat)
85
+ alternative_rewards.append(
86
+ torch.cat([torch.tensor(pre_branch_rewards), post_branch_rewards])
87
+ )
88
+
89
+ chat_turns: list[ChatTurn] = main_node.step_log.action_logs[agent_id].chat_turns
90
+ chat_turns: list[TrainingChatTurn] = [
91
+ TrainingChatTurn(time_step=main_node.time_step, **turn.model_dump())
92
+ for turn in chat_turns
93
+ ]
94
+
95
+ pre_branch_chat.extend(chat_turns)
96
+ pre_branch_rewards.append(
97
+ main_node.step_log.simulation_step_log.rewards[agent_id]
98
+ )
99
+
100
+ return alternative_chats, alternative_rewards
101
+
102
+
103
+ class TrainerAdAlign(BaseTrainer):
104
+ """
105
+ Extends the reinforce trainer to support Advantage Alignment.
106
+ """
107
+
108
+ def __init__(
109
+ self,
110
+ ad_align_beta: float,
111
+ ad_align_gamma: float,
112
+ ad_align_exclude_k_equals_t: bool,
113
+ ad_align_use_sign: bool,
114
+ ad_align_clipping: float,
115
+ ad_align_force_coop_first_step: bool,
116
+ use_old_ad_align: bool,
117
+ use_time_regularization: bool,
118
+ rloo_branch: bool,
119
+ reuse_baseline: bool,
120
+ ad_align_beta_anneal_step: int = -1,
121
+ ad_align_beta_anneal_rate: float = 0.5,
122
+ min_ad_align_beta: float = 0.1,
123
+ mean_normalize_ad_align: bool = False,
124
+ whiten_adalign_advantages: bool = False,
125
+ whiten_adalign_advantages_time_step_wise: bool = False,
126
+ ad_align_discount_t: bool = False,
127
+ *args,
128
+ **kwargs,
129
+ ):
130
+ """
131
+ Initialize the advantage alignment trainer.
132
+ Args:
133
+ ad_align_beta: Beta parameter for the advantage alignment.
134
+ ad_align_gamma: Gamma parameter for the advantage alignment.
135
+ ad_align_exclude_k_equals_t: Whether to include k = t in the advantage alignment.
136
+ ad_align_use_sign: Whether to use sign in the advantage alignment.
137
+ ad_align_clipping: Clipping value for the advantage alignment.
138
+ ad_align_force_coop_first_step: Whether to force coop on the first step of the advantage alignment.
139
+ """
140
+ super().__init__(*args, **kwargs)
141
+ self.ad_align_beta = ad_align_beta
142
+ self.ad_align_gamma = ad_align_gamma
143
+ self.ad_align_exclude_k_equals_t = ad_align_exclude_k_equals_t
144
+ self.ad_align_use_sign = ad_align_use_sign
145
+ self.ad_align_clipping = ad_align_clipping
146
+ self.ad_align_force_coop_first_step = ad_align_force_coop_first_step
147
+ self.use_old_ad_align = use_old_ad_align
148
+ self.use_time_regularization = use_time_regularization
149
+ self.rloo_branch = rloo_branch
150
+ self.reuse_baseline = reuse_baseline
151
+ self.ad_align_beta_anneal_step = ad_align_beta_anneal_step
152
+ self.ad_align_beta_anneal_rate = ad_align_beta_anneal_rate
153
+ self.min_ad_align_beta = min_ad_align_beta
154
+ self.past_ad_align_step = -1
155
+ self.mean_normalize_ad_align = mean_normalize_ad_align
156
+ self.whiten_adalign_advantages = whiten_adalign_advantages
157
+ self.whiten_adalign_advantages_time_step_wise = (
158
+ whiten_adalign_advantages_time_step_wise
159
+ )
160
+ self.ad_align_discount_t = ad_align_discount_t
161
+ self.training_data: dict[AgentId, AdAlignTrainingData] = {}
162
+ self.debug_path_list: list[str] = []
163
+
164
+ def set_agent_trajectory_data(
165
+ self, agent_id: str, roots: list[RolloutTreeRootNode]
166
+ ):
167
+ """
168
+ TOWRITE
169
+ Set the advantage alignment data for the trainer.
170
+ """
171
+
172
+ B = len(roots) # Number of rollouts
173
+
174
+ # For main rollouts
175
+ batch_rollout_ids = []
176
+ batch_crn_ids = []
177
+ batch_input_ids = []
178
+ batch_action_mask = []
179
+ batch_entropy_mask = []
180
+ batch_timesteps = []
181
+ batch_state_ends_mask = []
182
+ batch_engine_log_probs = []
183
+ batch_rewards = []
184
+
185
+ # For alternative actions rollouts
186
+ batch_branching_time_steps = []
187
+ alternative_batch_input_ids = []
188
+ alternative_batch_action_mask = []
189
+ alternative_batch_entropy_mask = []
190
+ alternative_batch_timesteps = []
191
+ alternative_batch_state_ends_mask = []
192
+ alternative_batch_engine_log_probs = []
193
+ alternative_batch_rewards = []
194
+ jT_list = []
195
+
196
+ try:
197
+ A = len(roots[0].child.branches[agent_id]) # Number of alternative actions
198
+ except:
199
+ A = 0
200
+
201
+ for root in roots:
202
+ rollout_id = root.id
203
+ self.debug_path_list.append(
204
+ "mgid:" + str(rollout_id) + "_agent_id:" + agent_id
205
+ )
206
+ # Get main trajectory
207
+ batch_rollout_ids.append(rollout_id)
208
+ batch_crn_ids.append(root.crn_id)
209
+ main_chat, main_rewards = get_main_chat_list_and_rewards(
210
+ agent_id=agent_id, root=root
211
+ )
212
+ (
213
+ input_ids,
214
+ action_mask,
215
+ entropy_mask,
216
+ timesteps,
217
+ state_ends_mask,
218
+ engine_log_probs,
219
+ ) = process_training_chat(
220
+ tokenizer=self.tokenizer,
221
+ chat_history=main_chat,
222
+ entropy_mask_regex=self.entropy_mask_regex,
223
+ exploration_prompts_to_remove=self.exploration_prompts_to_remove,
224
+ )
225
+ batch_input_ids.append(input_ids)
226
+ batch_action_mask.append(action_mask)
227
+ batch_entropy_mask.append(entropy_mask)
228
+ batch_timesteps.append(timesteps)
229
+ batch_state_ends_mask.append(state_ends_mask)
230
+ batch_engine_log_probs.append(engine_log_probs)
231
+ batch_rewards.append(main_rewards)
232
+ jT = main_rewards.numel() # TODO: better than this
233
+ jT_list.append(jT)
234
+ if A > 0:
235
+ # We get the branching time steps for each of the `jT` time steps in the main trajectory.
236
+ branching_time_steps = [bt for item in range(jT) for bt in A * [item]]
237
+ batch_branching_time_steps.extend(branching_time_steps)
238
+
239
+ # Get all of the (jT*A) alternative trajectories in the tree
240
+ # (jT is the number of time steps in the main trajectory, A is the number of alternative actions)
241
+ alternative_chats, alternative_rewards = get_alternative_chat_histories(
242
+ agent_id=agent_id, root=root
243
+ )
244
+ assert (
245
+ len(alternative_chats) == A * jT
246
+ ), "Incorrect number of alternative trajectories."
247
+
248
+ for chat, rewards in zip(alternative_chats, alternative_rewards):
249
+ (
250
+ input_ids,
251
+ action_mask,
252
+ entropy_mask,
253
+ timesteps,
254
+ state_ends_mask,
255
+ engine_log_probs,
256
+ ) = process_training_chat(
257
+ tokenizer=self.tokenizer,
258
+ chat_history=chat,
259
+ entropy_mask_regex=self.entropy_mask_regex,
260
+ exploration_prompts_to_remove=self.exploration_prompts_to_remove,
261
+ )
262
+ alternative_batch_input_ids.append(input_ids)
263
+ alternative_batch_action_mask.append(action_mask)
264
+ alternative_batch_entropy_mask.append(entropy_mask)
265
+ alternative_batch_timesteps.append(timesteps)
266
+ alternative_batch_state_ends_mask.append(state_ends_mask)
267
+ alternative_batch_engine_log_probs.append(engine_log_probs)
268
+ alternative_batch_rewards.append(rewards)
269
+
270
+ jT_list = torch.Tensor(jT_list)
271
+
272
+ # Assert that number of alternative actions is constant
273
+ # assert len(set(nb_alternative_actions)) == 1, "Number of alternative actions must be constant"
274
+ # A = nb_alternative_actions[0]
275
+
276
+ trajectory_batch = TrajectoryBatch(
277
+ rollout_ids=torch.tensor(batch_rollout_ids, dtype=torch.int32), # (B,)
278
+ crn_ids=torch.tensor(batch_crn_ids, dtype=torch.int32),
279
+ agent_ids=[agent_id] * len(batch_rollout_ids),
280
+ batch_input_ids=batch_input_ids,
281
+ batch_action_mask=batch_action_mask,
282
+ batch_entropy_mask=batch_entropy_mask,
283
+ batch_timesteps=batch_timesteps,
284
+ batch_state_ends_mask=batch_state_ends_mask,
285
+ batch_engine_log_probs=batch_engine_log_probs,
286
+ batch_rewards=batch_rewards,
287
+ )
288
+ # Get Advantages & Train Critic
289
+ with resource_logger_context(
290
+ logger, "Get advantages with critic gradient accumulation"
291
+ ):
292
+ self.batch_advantages: torch.FloatTensor = (
293
+ self.get_advantages_with_critic_gradient_accumulation(trajectory_batch)
294
+ ) # (B, jT)
295
+
296
+ if A > 0:
297
+ # Here, `A` is the number of alternative actions / trajectories taken at each time step.
298
+ # For each of the `B` rollout perspectives, at each of its jT (`j` is for jagged, since each main rollout may be of a different length) steps, we take A alternate trajectories (from different actions).
299
+ # Therefore, we have ∑jT * A trajectories to process. If each of the main trajectories have T steps, we will have `B*T*A` to process.
300
+ with resource_logger_context(logger, "Create alternative trajectory batch"):
301
+ sum_jT = int(torch.sum(jT_list).item())
302
+ jT_list = (
303
+ jT_list.int().tolist()
304
+ ) # (jT,) # (we only want the advantages where we branched out)
305
+ alternative_trajectory_batch = TrajectoryBatch(
306
+ rollout_ids=torch.zeros(A * sum_jT, dtype=torch.int32),
307
+ crn_ids=torch.zeros(A * sum_jT, dtype=torch.int32),
308
+ agent_ids=[agent_id] * (A * sum_jT),
309
+ batch_input_ids=alternative_batch_input_ids,
310
+ batch_action_mask=alternative_batch_action_mask,
311
+ batch_entropy_mask=alternative_batch_entropy_mask,
312
+ batch_timesteps=alternative_batch_timesteps,
313
+ batch_state_ends_mask=alternative_batch_state_ends_mask,
314
+ batch_engine_log_probs=alternative_batch_engine_log_probs,
315
+ batch_rewards=alternative_batch_rewards,
316
+ )
317
+
318
+ # Get alternative advantages
319
+ # BAAs stands for batch alternative advantages
320
+ # (torch nested tensors have very little api support, so we have to do some odd manual work here)
321
+ with resource_logger_context(
322
+ logger, "Compute alternative advantage estimates"
323
+ ):
324
+ BAAs_list = self.get_advantages_with_critic_gradient_accumulation(
325
+ alternative_trajectory_batch
326
+ ) # list length (∑jT * A), each (jT',)
327
+ # Pad alternative advantages to (∑jT*A, P)
328
+
329
+ BAAs_padded = pad_sequence(
330
+ BAAs_list, batch_first=True, padding_value=0.0
331
+ )
332
+ branch_idx = torch.tensor(
333
+ batch_branching_time_steps,
334
+ device=BAAs_padded.device,
335
+ dtype=torch.long,
336
+ )
337
+ gathered = BAAs_padded.gather(
338
+ dim=1, index=branch_idx.unsqueeze(1)
339
+ ).squeeze(1)
340
+ # Reshape and split per rollout, then transpose to (jT_i, A)
341
+ gathered = gathered.view(A, sum_jT) # (A, ∑jT)
342
+ blocks = list(
343
+ torch.split(gathered, jT_list, dim=1)
344
+ ) # len B, shapes (A, jT_i)
345
+ BAAs = [
346
+ blk.transpose(0, 1).contiguous() for blk in blocks
347
+ ] # list of (jT_i, A)
348
+ if self.ad_align_beta_anneal_step > 0:
349
+ max_rollout_id = torch.max(trajectory_batch.rollout_ids) + 1
350
+ if (
351
+ max_rollout_id % self.ad_align_beta_anneal_step == 0
352
+ and self.past_ad_align_step != max_rollout_id
353
+ ):
354
+ self.ad_align_beta = max(
355
+ self.ad_align_beta * self.ad_align_beta_anneal_rate,
356
+ self.min_ad_align_beta,
357
+ )
358
+ logger.info(f"Annealing ad_align_beta to {self.ad_align_beta}")
359
+ self.past_ad_align_step = max_rollout_id
360
+ self.training_data[agent_id] = AdAlignTrainingData(
361
+ agent_id=agent_id,
362
+ main_data=trajectory_batch,
363
+ main_advantages=self.batch_advantages,
364
+ alternative_advantages=BAAs if A > 0 else None,
365
+ )
366
+
367
+ def share_advantage_data(self) -> list[AdvantagePacket]:
368
+ """
369
+ Share the advantage alignment data with other agents.
370
+ Returns:
371
+ AdvantagePacket: The advantage packet containing the agent's advantages.
372
+ """
373
+ logger.info(f"Sharing advantage alignment data.")
374
+ advantage_packets = []
375
+ for _, agent_data in self.training_data.items():
376
+ advantage_packets.append(
377
+ AdvantagePacket(
378
+ agent_id=agent_data.agent_id,
379
+ rollout_ids=agent_data.main_data.rollout_ids,
380
+ main_advantages=agent_data.main_advantages,
381
+ )
382
+ )
383
+ return advantage_packets
384
+
385
+ def receive_advantage_data(self, advantage_packets: list[AdvantagePacket]):
386
+ """
387
+ Receive advantage packets from other players.
388
+ These contain the advantages of the other players' rollouts estimated by them.
389
+ """
390
+ logger.info(f"Receiving advantage packets.")
391
+
392
+ assert (
393
+ len(advantage_packets) > 0
394
+ ), "At least one advantage packet must be provided."
395
+
396
+ for agent_id, agent_data in self.training_data.items():
397
+ coagent_advantage_packets = [
398
+ packet for packet in advantage_packets if packet.agent_id != agent_id
399
+ ]
400
+ agent_rollout_ids = agent_data.main_data.rollout_ids
401
+ agent_advantages = agent_data.main_advantages
402
+ co_agent_advantages = []
403
+ for rollout_id in agent_rollout_ids:
404
+ for co_agent_packet in coagent_advantage_packets:
405
+ if rollout_id in co_agent_packet.rollout_ids:
406
+ index = torch.where(rollout_id == co_agent_packet.rollout_ids)[
407
+ 0
408
+ ].item()
409
+ co_agent_advantages.append(
410
+ co_agent_packet.main_advantages[index]
411
+ )
412
+ # assumes that its two player game, with one co-agent
413
+ break
414
+ assert len(co_agent_advantages) == len(agent_advantages)
415
+ B = len(agent_advantages)
416
+ assert all(
417
+ a.shape[0] == b.shape[0]
418
+ for a, b in zip(co_agent_advantages, agent_advantages)
419
+ ), "Number of advantages must match for advantage alignment."
420
+
421
+ # Get padded tensors (advantage alignment is invariant to padding)
422
+ lengths = torch.tensor(
423
+ [len(t) for t in agent_advantages],
424
+ device=self.device,
425
+ dtype=torch.long,
426
+ )
427
+ padded_main_advantages = pad_sequence(
428
+ agent_advantages, batch_first=True, padding_value=0.0
429
+ )
430
+ if agent_data.alternative_advantages:
431
+ padded_alternative_advantages = pad_sequence(
432
+ agent_data.alternative_advantages,
433
+ batch_first=True,
434
+ padding_value=0.0,
435
+ ) # (B, P, A)
436
+ else:
437
+ padded_alternative_advantages = None
438
+ padded_co_agent_advantages = pad_sequence(
439
+ co_agent_advantages, batch_first=True, padding_value=0.0
440
+ )
441
+
442
+ # Create training batch data
443
+ credits, sub_tensors = get_advantage_alignment_credits(
444
+ a1=padded_main_advantages,
445
+ a1_alternative=padded_alternative_advantages,
446
+ a2=padded_co_agent_advantages,
447
+ beta=self.ad_align_beta,
448
+ gamma=self.ad_align_gamma,
449
+ exclude_k_equals_t=self.ad_align_exclude_k_equals_t,
450
+ use_sign=self.ad_align_use_sign,
451
+ clipping=self.ad_align_clipping,
452
+ force_coop_first_step=self.ad_align_force_coop_first_step,
453
+ use_old_ad_align=self.use_old_ad_align,
454
+ use_time_regularization=self.use_time_regularization,
455
+ rloo_branch=self.rloo_branch,
456
+ reuse_baseline=self.reuse_baseline,
457
+ mean_normalize_ad_align=self.mean_normalize_ad_align,
458
+ whiten_adalign_advantages=self.whiten_adalign_advantages,
459
+ whiten_adalign_advantages_time_step_wise=self.whiten_adalign_advantages_time_step_wise,
460
+ discount_t=self.ad_align_discount_t,
461
+ )
462
+ for key, value in sub_tensors.items():
463
+ self.rollout_tally.add_metric(
464
+ path=[key],
465
+ rollout_tally_item=RolloutTallyItem(
466
+ crn_ids=agent_data.main_data.crn_ids,
467
+ rollout_ids=agent_data.main_data.rollout_ids,
468
+ agent_ids=agent_data.main_data.agent_ids,
469
+ metric_matrix=value,
470
+ ),
471
+ )
472
+
473
+ if not self.skip_discounted_state_visitation:
474
+ credits = get_discounted_state_visitation_credits(
475
+ credits,
476
+ self.discount_factor,
477
+ )
478
+ self.rollout_tally.add_metric(
479
+ path=["discounted_state_visitation_credits"],
480
+ rollout_tally_item=RolloutTallyItem(
481
+ crn_ids=agent_data.main_data.crn_ids,
482
+ rollout_ids=agent_data.main_data.rollout_ids,
483
+ agent_ids=agent_data.main_data.agent_ids,
484
+ metric_matrix=sub_tensors[
485
+ "discounted_state_visitation_credits"
486
+ ],
487
+ ),
488
+ )
489
+
490
+ # Slice back to jagged
491
+ advantage_alignment_credits = [credits[i, : lengths[i]] for i in range(B)]
492
+ # Replace stored training data for this agent by the concrete trajectory batch
493
+ # and attach the computed credits for policy gradient.
494
+ self.training_data[agent_id] = agent_data.main_data
495
+ self.training_data[agent_id].batch_credits = advantage_alignment_credits
src_code_for_reproducibility/training/trainer_common.py ADDED
@@ -0,0 +1,1054 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ TODO: Add coefficients for losses (depend on total number of tokens or batch)
3
+ TODO: adapt reinforce step for torch.compile
4
+ TODO: add lr schedulers support
5
+ """
6
+ import logging
7
+ import os
8
+ import pickle
9
+ import sys
10
+ from abc import ABC, abstractmethod
11
+ from typing import Callable, Literal, Union
12
+
13
+ import numpy as np
14
+ import torch
15
+ import torch.nn.functional as F
16
+ from accelerate import Accelerator
17
+ from pandas._libs.tslibs.offsets import CBMonthBegin
18
+ from peft import LoraConfig
19
+ from torch.nn.utils.rnn import pad_sequence
20
+ from transformers import AutoModelForCausalLM, AutoTokenizer
21
+
22
+ from mllm.markov_games.rollout_tree import *
23
+ from mllm.markov_games.rollout_tree import RolloutTreeRootNode
24
+ from mllm.training.annealing_methods import sigmoid_annealing
25
+ from mllm.training.credit_methods import (
26
+ get_discounted_returns,
27
+ get_generalized_advantage_estimates,
28
+ get_rloo_credits,
29
+ whiten_advantages,
30
+ whiten_advantages_time_step_wise,
31
+ )
32
+ from mllm.training.tally_metrics import Tally
33
+ from mllm.training.tally_rollout import RolloutTally, RolloutTallyItem
34
+ from mllm.training.tally_tokenwise import ContextualizedTokenwiseTally
35
+ from mllm.training.tokenize_chats import *
36
+ from mllm.training.tokenize_chats import process_training_chat
37
+ from mllm.training.training_data_utils import *
38
+ from mllm.training.training_data_utils import (
39
+ TrainingBatch,
40
+ TrajectoryBatch,
41
+ get_tokenwise_credits,
42
+ )
43
+ from mllm.utils.resource_context import resource_logger_context
44
+
45
+ logger = logging.getLogger(__name__)
46
+ logger.addHandler(logging.StreamHandler(sys.stdout))
47
+
48
+
49
+ @dataclass
50
+ class TrainerAnnealingState:
51
+ annealing_step_counter: int = 0
52
+
53
+
54
+ class BaseTrainer(ABC):
55
+ """
56
+ Trainer
57
+ """
58
+
59
+ def __init__(
60
+ self,
61
+ policy: AutoModelForCausalLM,
62
+ policy_optimizer: torch.optim.Optimizer,
63
+ critic: Union[AutoModelForCausalLM, None],
64
+ critic_optimizer: Union[torch.optim.Optimizer, None],
65
+ tokenizer: AutoTokenizer,
66
+ lr_scheduler: torch.optim.lr_scheduler.LRScheduler,
67
+ critic_lr_scheduler: Union[torch.optim.lr_scheduler.LRScheduler, None],
68
+ ######################################################################
69
+ entropy_coeff: float,
70
+ entropy_topk: int,
71
+ entropy_mask_regex: Union[str, None],
72
+ kl_coeff: float,
73
+ gradient_clipping: Union[float, None],
74
+ restrict_tokens: Union[list[str], None],
75
+ mini_batch_size: int,
76
+ use_gradient_checkpointing: bool,
77
+ temperature: float,
78
+ device: str,
79
+ whiten_advantages: bool,
80
+ whiten_advantages_time_step_wise: bool,
81
+ use_gae: bool,
82
+ use_gae_lambda_annealing: bool,
83
+ gae_lambda_annealing_limit: float,
84
+ gae_lambda_annealing_method: Literal["sigmoid_annealing"],
85
+ gae_lambda_annealing_method_params: dict,
86
+ pg_loss_normalization: Literal["batch", "nb_tokens"],
87
+ use_rloo: bool,
88
+ skip_discounted_state_visitation: bool,
89
+ discount_factor: float,
90
+ enable_tokenwise_logging: bool,
91
+ save_path: str,
92
+ reward_normalizing_constant: float = 1.0,
93
+ critic_loss_type: Literal["mse", "huber"] = "huber",
94
+ exploration_prompts_to_remove: list[str] = [],
95
+ filter_higher_refprob_tokens_kl: bool = False,
96
+ truncated_importance_sampling_ratio_cap: float = 0.0,
97
+ importance_sampling_strategy: Literal[
98
+ "per_token", "per_sequence"
99
+ ] = "per_token",
100
+ ):
101
+ """
102
+ Initialize the REINFORCE trainer with reward shaping for multi-agent or single-agent training.
103
+
104
+ Args:
105
+ model (AutoModelForCausalLM): The main policy model.
106
+ tokenizer (AutoTokenizer): Tokenizer for the model.
107
+ optimizer (torch.optim.Optimizer): Optimizer for the policy model.
108
+ lr_scheduler (torch.optim.lr_scheduler.LRScheduler): Learning rate scheduler for the policy model.
109
+ critic (AutoModelForCausalLM or None): Critic model for value estimation (optional).
110
+ critic_optimizer (torch.optim.Optimizer or None): Optimizer for the critic model (optional).
111
+ critic_lr_scheduler (torch.optim.lr_scheduler.LRScheduler or None): LR scheduler for the critic (optional).
112
+ config (RtConfig): Configuration object for training.
113
+ """
114
+ self.tokenizer = tokenizer
115
+ # self.tokenizer.padding_side = "left" # needed for flash attention
116
+ if self.tokenizer.pad_token_id is None:
117
+ self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
118
+ self.lr_scheduler = lr_scheduler
119
+ self.accelerator = Accelerator()
120
+ (
121
+ self.policy,
122
+ self.policy_optimizer,
123
+ self.critic,
124
+ self.critic_optimizer,
125
+ ) = self.accelerator.prepare(policy, policy_optimizer, critic, critic_optimizer)
126
+
127
+ self.critic_lr_scheduler = critic_lr_scheduler
128
+ self.tally = Tally()
129
+
130
+ if use_gradient_checkpointing == True:
131
+ self.policy.gradient_checkpointing_enable(dict(use_reentrant=False))
132
+ if critic is not None:
133
+ self.critic.gradient_checkpointing_enable(dict(use_reentrant=False))
134
+
135
+ self.save_path = save_path
136
+
137
+ # Load trainer state if it exists
138
+ self.trainer_annealing_state_path = os.path.join(
139
+ self.save_path, "trainer_annealing_state.pkl"
140
+ )
141
+ if os.path.exists(self.trainer_annealing_state_path):
142
+ logger.info(
143
+ f"Loading trainer state from {self.trainer_annealing_state_path}"
144
+ )
145
+ self.trainer_annealing_state = pickle.load(
146
+ open(self.trainer_annealing_state_path, "rb")
147
+ )
148
+ else:
149
+ self.trainer_annealing_state = TrainerAnnealingState()
150
+
151
+ # Load policy optimizer state if it exists
152
+ self.policy_optimizer_path = os.path.join(
153
+ self.save_path, "policy_optimizer_state.pt"
154
+ )
155
+ if os.path.exists(self.policy_optimizer_path):
156
+ logger.info(
157
+ f"Loading policy optimizer state from {self.policy_optimizer_path}"
158
+ )
159
+ self.policy_optimizer.load_state_dict(
160
+ torch.load(self.policy_optimizer_path)
161
+ )
162
+
163
+ # Load critic optimizer state if it exists
164
+ self.critic_optimizer_path = os.path.join(
165
+ self.save_path, "critic_optimizer_state.pt"
166
+ )
167
+ if (
168
+ os.path.exists(self.critic_optimizer_path)
169
+ and self.critic_optimizer is not None
170
+ ):
171
+ logger.info(
172
+ f"Loading critic optimizer state from {self.critic_optimizer_path}"
173
+ )
174
+ self.critic_optimizer.load_state_dict(
175
+ torch.load(self.critic_optimizer_path)
176
+ )
177
+ self.device = self.accelerator.device
178
+ self.entropy_coeff = entropy_coeff
179
+ self.entropy_topk = entropy_topk
180
+ self.entropy_mask_regex = entropy_mask_regex
181
+ self.kl_coeff = kl_coeff
182
+ self.gradient_clipping = gradient_clipping
183
+ self.restrict_tokens = restrict_tokens
184
+ self.mini_batch_size = mini_batch_size
185
+ self.use_gradient_checkpointing = use_gradient_checkpointing
186
+ self.temperature = temperature
187
+ self.use_gae = use_gae
188
+ self.whiten_advantages = whiten_advantages
189
+ self.whiten_advantages_time_step_wise = whiten_advantages_time_step_wise
190
+ self.use_rloo = use_rloo
191
+ self.skip_discounted_state_visitation = skip_discounted_state_visitation
192
+ self.use_gae_lambda_annealing = use_gae_lambda_annealing
193
+ self.gae_lambda_annealing_limit = gae_lambda_annealing_limit
194
+ if use_gae_lambda_annealing:
195
+ self.gae_lambda_annealing_method: Callable[
196
+ [int], float
197
+ ] = lambda step: eval(gae_lambda_annealing_method)(
198
+ step=step, **gae_lambda_annealing_method_params
199
+ )
200
+ self.discount_factor = discount_factor
201
+ self.enable_tokenwise_logging = enable_tokenwise_logging
202
+ self.reward_normalizing_constant = reward_normalizing_constant
203
+ self.pg_loss_normalization = pg_loss_normalization
204
+ self.critic_loss_type = critic_loss_type
205
+ self.exploration_prompts_to_remove = exploration_prompts_to_remove
206
+ # Common containers used by all trainers
207
+ self.training_data: dict = {}
208
+ self.debug_path_list: list[str] = []
209
+ self.policy_gradient_data = None
210
+ self.tally = Tally()
211
+ self.rollout_tally = RolloutTally()
212
+ self.tokenwise_tally: Union[ContextualizedTokenwiseTally, None] = None
213
+ self.filter_higher_refprob_tokens_kl = filter_higher_refprob_tokens_kl
214
+ self.truncated_importance_sampling_ratio_cap = (
215
+ truncated_importance_sampling_ratio_cap
216
+ )
217
+ self.importance_sampling_strategy = importance_sampling_strategy
218
+
219
+ def mask_non_restricted_token_logits(self, logits: torch.Tensor) -> torch.Tensor:
220
+ """
221
+ Masks logits so that only allowed tokens (as specified in config.restrict_tokens)
222
+ and the EOS token are active.
223
+ All other logits are set to -inf, effectively removing them from the softmax.
224
+
225
+ Args:
226
+ logits (torch.Tensor): The logits tensor of shape (B, S, V).
227
+
228
+ Returns:
229
+ torch.Tensor: The masked logits tensor.
230
+ """
231
+ # TODO: verify. Not sure what we do here is differentiable
232
+ # also, we recompute for nothing
233
+
234
+ if self.restrict_tokens is not None:
235
+ allowed_token_ids = []
236
+ for token in self.restrict_tokens:
237
+ token_ids = self.tokenizer(token, add_special_tokens=False)["input_ids"]
238
+ allowed_token_ids.append(token_ids[0])
239
+ allowed_token_ids.append(
240
+ self.tokenizer.eos_token_id
241
+ ) # This token should always be active
242
+ allowed_token_ids = torch.tensor(allowed_token_ids, device=logits.device)
243
+ # Mask log_probs and probs to only allowed tokens
244
+ mask = torch.zeros_like(logits).bool() # (B, S, V)
245
+ mask[..., allowed_token_ids] = True
246
+ logits = torch.where(
247
+ mask,
248
+ logits,
249
+ torch.tensor(-float("inf"), device=logits.device),
250
+ )
251
+
252
+ return logits
253
+
254
+ # def get_gradient_magnitude(self, loss_term: torch.Tensor) -> float:
255
+ # """
256
+ # Computes the L2 norm of the gradients of the given loss term with respect to the model parameters.
257
+
258
+ # Args:
259
+ # loss_term (torch.Tensor): The loss tensor to compute gradients for.
260
+
261
+ # Returns:
262
+ # float: The L2 norm of the gradients, or 0.0 if no gradients are present.
263
+ # """
264
+ # with torch.no_grad():
265
+ # grads = torch.autograd.grad(
266
+ # loss_term,
267
+ # [p for p in self.policy.parameters() if p.requires_grad],
268
+ # retain_graph=True,
269
+ # allow_unused=True,
270
+ # )
271
+ # grads = [g for g in grads if g is not None]
272
+ # if not grads:
273
+ # return torch.tensor(0.0, device=loss_term.device)
274
+ # return torch.norm(torch.stack([g.norm(2) for g in grads])).item()
275
+
276
+ def apply_reinforce_step(
277
+ self,
278
+ training_batch: TrainingBatch,
279
+ ) -> None:
280
+ """
281
+ Applies a single REINFORCE policy gradient step using the provided batch of rollouts.
282
+ Handles batching, loss computation (including entropy and KL regularization), gradient accumulation, and optimizer step.
283
+ Optionally logs various metrics and statistics.
284
+
285
+ Args:
286
+ paths (list[str]): List of game complete file paths for each rollout.
287
+ contexts (list[torch.Tensor]): List of context tensors for each rollout.
288
+ credits (list[torch.Tensor]): List of credit tensors (rewards/advantages) for each rollout.
289
+ action_masks (list[torch.Tensor]): List of action mask tensors for each rollout.
290
+ """
291
+ with resource_logger_context(logger, "Apply reinforce step"):
292
+ self.policy.train()
293
+ mb_size = self.mini_batch_size
294
+ nb_rollouts = len(training_batch)
295
+
296
+ # Initialize running mean logs
297
+ running_mean_logs = {
298
+ "rl_objective": 0.0,
299
+ "policy_gradient_loss": 0.0,
300
+ "policy_gradient_norm": 0.0,
301
+ "log_probs": 0.0,
302
+ "credits": 0.0,
303
+ "entropy": 0.0,
304
+ "engine_log_probs_diff_clampfrac": 0.0,
305
+ "tis_imp_ratio": 0.0,
306
+ "ref_log_probs_diff_clampfrac": 0.0,
307
+ "higher_refprob_frac": 0.0,
308
+ "tis_imp_ratio_clampfrac": 0.0,
309
+ }
310
+ if self.entropy_coeff != 0.0:
311
+ running_mean_logs["entropy"] = 0.0
312
+ if self.kl_coeff != 0.0:
313
+ running_mean_logs["kl_divergence"] = 0.0
314
+
315
+ # Get total number of tokens generated
316
+ total_tokens_generated = 0
317
+ for att_mask in training_batch.batch_action_mask:
318
+ total_tokens_generated += att_mask.sum()
319
+
320
+ # Obtain loss normalization
321
+ if self.pg_loss_normalization == "nb_tokens":
322
+ normalization_factor = total_tokens_generated
323
+ elif self.pg_loss_normalization == "batch":
324
+ normalization_factor = np.ceil(nb_rollouts / mb_size).astype(int)
325
+ else:
326
+ raise ValueError(
327
+ f"Invalid pg_loss_normalization: {self.pg_loss_normalization}"
328
+ )
329
+
330
+ # Gradient accumulation for each mini-batch
331
+ for mb in range(0, nb_rollouts, mb_size):
332
+ logger.info(f"Processing mini-batch {mb} of {nb_rollouts}")
333
+ loss = 0.0
334
+ training_mb = training_batch[mb : mb + mb_size]
335
+ training_mb = training_mb.get_padded_tensors()
336
+ training_mb.to(self.device)
337
+ (
338
+ tokens_mb,
339
+ action_mask_mb,
340
+ entropy_mask_mb,
341
+ credits_mb,
342
+ engine_log_probs_mb,
343
+ timesteps_mb,
344
+ ) = (
345
+ training_mb.batch_input_ids,
346
+ training_mb.batch_action_mask,
347
+ training_mb.batch_entropy_mask,
348
+ training_mb.batch_credits,
349
+ training_mb.batch_engine_log_probs,
350
+ training_mb.batch_timesteps,
351
+ )
352
+
353
+ # Next token prediction
354
+ contexts_mb = tokens_mb[:, :-1]
355
+ shifted_contexts_mb = tokens_mb[:, 1:]
356
+ action_mask_mb = action_mask_mb[:, 1:]
357
+ entropy_mask_mb = entropy_mask_mb[:, 1:]
358
+ credits_mb = credits_mb[:, 1:]
359
+ engine_log_probs_mb = engine_log_probs_mb[:, 1:]
360
+ timesteps_mb = timesteps_mb[:, 1:]
361
+
362
+ if self.enable_tokenwise_logging:
363
+ self.tokenwise_tally.set_action_mask(action_mask=action_mask_mb)
364
+ self.tokenwise_tally.set_range(range=(mb, mb + mb_size))
365
+ self.tokenwise_tally.add_contexts(contexts=contexts_mb)
366
+ self.tokenwise_tally.add_data(
367
+ metric_id="next_token",
368
+ metrics=shifted_contexts_mb,
369
+ to_tids=True,
370
+ )
371
+ self.tokenwise_tally.add_data(
372
+ metric_id="entropy_mask",
373
+ metrics=entropy_mask_mb,
374
+ )
375
+
376
+ if self.enable_tokenwise_logging:
377
+ self.tokenwise_tally.add_data(
378
+ metric_id="next_token_credit", metrics=credits_mb
379
+ )
380
+
381
+ # Forward pass + cast to FP-32 for higher prec.
382
+ # TODO: create attention mask if not relying on default (assume causal llm)
383
+ logits = self.policy(input_ids=contexts_mb)[0] # (B, S, V)
384
+
385
+ # Mask non-restricted tokens
386
+ if self.restrict_tokens is not None:
387
+ logits = self.mask_non_restricted_token_logits(logits)
388
+
389
+ logits /= self.temperature # (B, S, V)
390
+
391
+ # Compute new log probabilities
392
+ log_probs = F.log_softmax(logits, dim=-1) # (B, S, V)
393
+
394
+ # Get log probabilities of actions taken during rollouts
395
+ action_log_probs = log_probs.gather(
396
+ dim=-1, index=shifted_contexts_mb.unsqueeze(-1)
397
+ ).squeeze(
398
+ -1
399
+ ) # (B, S)
400
+ if self.pg_loss_normalization == "batch":
401
+ den_running_mean = action_mask_mb.sum() * normalization_factor
402
+ else:
403
+ den_running_mean = normalization_factor
404
+ running_mean_logs["log_probs"] += (
405
+ action_log_probs * action_mask_mb
406
+ ).sum().item() / den_running_mean
407
+ running_mean_logs["credits"] += (
408
+ credits_mb * action_mask_mb
409
+ ).sum().item() / den_running_mean
410
+
411
+ if self.enable_tokenwise_logging:
412
+ self.tokenwise_tally.add_data(
413
+ metric_id="next_token_log_prob",
414
+ metrics=action_log_probs,
415
+ )
416
+ self.tokenwise_tally.add_data(
417
+ metric_id="engine_next_token_log_prob",
418
+ metrics=engine_log_probs_mb,
419
+ )
420
+ self.tokenwise_tally.add_data(
421
+ metric_id="next_token_prob",
422
+ metrics=torch.exp(action_log_probs),
423
+ )
424
+ top_k_indices = torch.topk(logits, k=5, dim=-1).indices
425
+ self.tokenwise_tally.add_data(
426
+ metric_id=f"top_{5}_tids",
427
+ metrics=top_k_indices,
428
+ to_tids=True,
429
+ )
430
+ self.tokenwise_tally.add_data(
431
+ metric_id=f"top_{5}_probs",
432
+ metrics=torch.exp(log_probs).gather(
433
+ dim=-1, index=top_k_indices
434
+ ),
435
+ )
436
+
437
+ rewarded_action_log_probs = (
438
+ action_mask_mb * credits_mb * action_log_probs
439
+ )
440
+ # (B, S)
441
+ INVALID_LOGPROB = 1.0
442
+ CLAMP_VALUE = 40.0
443
+ masked_action_log_probs = torch.masked_fill(
444
+ action_log_probs, ~action_mask_mb, INVALID_LOGPROB
445
+ )
446
+ masked_engine_log_probs = torch.masked_fill(
447
+ engine_log_probs_mb, ~action_mask_mb, INVALID_LOGPROB
448
+ )
449
+ with torch.no_grad():
450
+ action_engine_log_probs_diff = (
451
+ masked_action_log_probs - masked_engine_log_probs
452
+ ).clamp(-CLAMP_VALUE, CLAMP_VALUE)
453
+ running_mean_logs["engine_log_probs_diff_clampfrac"] += (
454
+ action_engine_log_probs_diff.abs()
455
+ .eq(CLAMP_VALUE)
456
+ .float()
457
+ .sum()
458
+ .item()
459
+ / den_running_mean
460
+ )
461
+ if self.importance_sampling_strategy == "per_sequence":
462
+ tis_imp_ratio = torch.zeros_like(action_engine_log_probs_diff)
463
+ for mb_idx in range(action_engine_log_probs_diff.shape[0]):
464
+ valid_token_mask = action_mask_mb[mb_idx]
465
+ timestep_ids = timesteps_mb[mb_idx][valid_token_mask]
466
+ timestep_logprob_diffs = action_engine_log_probs_diff[mb_idx][
467
+ valid_token_mask
468
+ ]
469
+ max_timestep = int(timestep_ids.max().item()) + 1
470
+ timestep_sums = torch.zeros(
471
+ max_timestep,
472
+ device=action_engine_log_probs_diff.device,
473
+ dtype=action_engine_log_probs_diff.dtype,
474
+ )
475
+ timestep_sums.scatter_add_(
476
+ 0, timestep_ids, timestep_logprob_diffs
477
+ )
478
+ timestep_ratios = torch.exp(timestep_sums)
479
+ tis_imp_ratio[
480
+ mb_idx, valid_token_mask
481
+ ] = timestep_ratios.gather(0, timestep_ids)
482
+ else:
483
+ tis_imp_ratio = torch.exp(action_engine_log_probs_diff)
484
+ running_mean_logs["tis_imp_ratio"] += (
485
+ tis_imp_ratio * action_mask_mb
486
+ ).sum().item() / den_running_mean
487
+ if self.truncated_importance_sampling_ratio_cap > 0.0:
488
+ tis_imp_ratio = torch.clamp(
489
+ tis_imp_ratio, max=self.truncated_importance_sampling_ratio_cap
490
+ )
491
+ running_mean_logs["tis_imp_ratio_clampfrac"] += (
492
+ tis_imp_ratio.eq(self.truncated_importance_sampling_ratio_cap)
493
+ .float()
494
+ .sum()
495
+ .item()
496
+ ) / den_running_mean
497
+ rewarded_action_log_probs = (
498
+ rewarded_action_log_probs * tis_imp_ratio
499
+ )
500
+
501
+ if self.enable_tokenwise_logging:
502
+ self.tokenwise_tally.add_data(
503
+ metric_id="next_token_clogπ",
504
+ metrics=rewarded_action_log_probs,
505
+ )
506
+
507
+ # Add value term to loss
508
+ if self.pg_loss_normalization == "batch":
509
+ nb_act_tokens = action_mask_mb.sum()
510
+ mb_value = -rewarded_action_log_probs.sum() / nb_act_tokens
511
+ else:
512
+ mb_value = -rewarded_action_log_probs.sum()
513
+
514
+ loss += mb_value
515
+ running_mean_logs["rl_objective"] += mb_value.item() / den_running_mean
516
+
517
+ # -------------------------------------------------
518
+ # Entropy Regularization
519
+ # -------------------------------------------------
520
+ # Only apply entropy on distribution defined over most probable tokens
521
+ if self.entropy_topk is not None:
522
+ top_k_indices = torch.topk(
523
+ logits, k=self.entropy_topk, dim=-1
524
+ ).indices
525
+ entropy_logits = logits.gather(dim=-1, index=top_k_indices)
526
+ else:
527
+ entropy_logits = logits
528
+
529
+ token_entropy_terms = -F.softmax(
530
+ entropy_logits, dim=-1
531
+ ) * F.log_softmax(
532
+ entropy_logits, dim=-1
533
+ ) # (B, S, T)
534
+ token_entropy_terms *= (
535
+ action_mask_mb[:, :, None] * entropy_mask_mb[:, :, None]
536
+ ) # only get loss on specific action tokens
537
+
538
+ mb_entropy = token_entropy_terms.sum(dim=-1)
539
+
540
+ if self.enable_tokenwise_logging:
541
+ self.tokenwise_tally.add_data(
542
+ metric_id="entropy",
543
+ metrics=mb_entropy,
544
+ )
545
+ if self.pg_loss_normalization == "batch":
546
+ nb_act_tokens = action_mask_mb.sum()
547
+ mb_entropy = -mb_entropy.sum() / nb_act_tokens
548
+ else:
549
+ mb_entropy = -mb_entropy.sum()
550
+ running_mean_logs["entropy"] += -mb_entropy.item() / den_running_mean
551
+ if self.entropy_coeff != 0.0:
552
+ mb_entropy *= self.entropy_coeff
553
+ loss += mb_entropy
554
+
555
+ # -------------------------------------------------
556
+ # KL-DIVERGENCE
557
+ # -------------------------------------------------
558
+ if self.kl_coeff != 0.0:
559
+ ref_model_logits = self.policy.get_base_model_logits(contexts_mb)
560
+ ref_model_logits = ref_model_logits / self.temperature
561
+ # (B, S, V)
562
+ ref_model_logits = self.mask_non_restricted_token_logits(
563
+ logits=ref_model_logits
564
+ )
565
+ # (B, S, V)
566
+ ref_model_log_probs = F.log_softmax(ref_model_logits, dim=-1)
567
+ # (B, S, V)
568
+ ref_model_action_log_probs = ref_model_log_probs.gather(
569
+ dim=-1, index=shifted_contexts_mb.unsqueeze(-1)
570
+ ).squeeze(
571
+ -1
572
+ ) # (B,S)
573
+ # Approximating KL Divergence (see refs in docstring)
574
+ # Ref 1: http://joschu.net/blog/kl-approx.html
575
+ # Ref 2: https://github.dev/huggingface/trl/blob/main/trl/trainer/grpo_trainer.py#L1332
576
+ masked_ref_model_action_log_probs = torch.masked_fill(
577
+ ref_model_action_log_probs, ~action_mask_mb, INVALID_LOGPROB
578
+ )
579
+ action_log_probs_diff = (
580
+ masked_ref_model_action_log_probs - masked_action_log_probs
581
+ ).clamp(-CLAMP_VALUE, CLAMP_VALUE)
582
+ running_mean_logs["ref_log_probs_diff_clampfrac"] += (
583
+ action_log_probs_diff.abs().eq(CLAMP_VALUE).float().sum().item()
584
+ / den_running_mean
585
+ )
586
+ if self.filter_higher_refprob_tokens_kl:
587
+ higher_refprob_tokens_mask = action_log_probs_diff > 0.0
588
+ running_mean_logs["higher_refprob_frac"] += (
589
+ higher_refprob_tokens_mask.sum().item() / den_running_mean
590
+ )
591
+ action_log_probs_diff = action_log_probs_diff * (
592
+ ~higher_refprob_tokens_mask
593
+ )
594
+ kl_div = torch.expm1(action_log_probs_diff) - action_log_probs_diff
595
+ kl_div *= action_mask_mb # We only care about KLD of action tokens
596
+ if self.truncated_importance_sampling_ratio_cap > 0.0:
597
+ kl_div = kl_div * tis_imp_ratio
598
+ kl_div *= self.kl_coeff
599
+ if self.enable_tokenwise_logging:
600
+ self.tokenwise_tally.add_data(
601
+ metric_id="ref_model_next_token_log_prob",
602
+ metrics=ref_model_action_log_probs,
603
+ )
604
+ self.tokenwise_tally.add_data(
605
+ metric_id="kl_divergence",
606
+ metrics=kl_div,
607
+ )
608
+
609
+ if self.pg_loss_normalization == "batch":
610
+ nb_act_tokens = action_mask_mb.sum()
611
+ mb_kl = kl_div.sum() / nb_act_tokens
612
+ else:
613
+ mb_kl = kl_div.sum()
614
+ running_mean_logs["kl_divergence"] += (
615
+ mb_kl.item() / den_running_mean
616
+ )
617
+ loss += mb_kl
618
+
619
+ # Accumulate gradient
620
+ running_mean_logs["policy_gradient_loss"] += (
621
+ loss.item() / den_running_mean
622
+ )
623
+ loss /= normalization_factor
624
+ self.accelerator.backward(loss)
625
+
626
+ # ensure gpu memory is freed
627
+ del training_mb
628
+ del log_probs
629
+ del logits
630
+ del loss
631
+ del action_log_probs
632
+ del rewarded_action_log_probs
633
+
634
+ logger.info(
635
+ f"Accumulated the policy gradient loss for {total_tokens_generated} tokens."
636
+ )
637
+
638
+ # Clip gradients and take step
639
+ if self.gradient_clipping is not None:
640
+ grad_norm = self.accelerator.clip_grad_norm_(
641
+ self.policy.parameters(), self.gradient_clipping
642
+ )
643
+ running_mean_logs["policy_gradient_norm"] += grad_norm.item()
644
+
645
+ # Take step
646
+ self.policy_optimizer.step()
647
+ self.policy_optimizer.zero_grad()
648
+
649
+ # Store logs
650
+ for key, value in running_mean_logs.items():
651
+ self.tally.add_metric(path=key, metric=value)
652
+
653
+ # Clear
654
+ # TODO: verify
655
+ self.accelerator.clear(self.policy, self.policy_optimizer)
656
+ import gc
657
+
658
+ gc.collect()
659
+ torch.cuda.empty_cache()
660
+ return running_mean_logs
661
+
662
+ def get_advantages_with_critic_gradient_accumulation(
663
+ self, trajectories: TrajectoryBatch, critic_loss_scaling_factor: float = 2.0
664
+ ) -> torch.FloatTensor:
665
+ """
666
+ TOWRITE
667
+ Uses GAE if enabled, otherwise uses Monte Carlo returns.
668
+ Optionally trains the critic if GAE is used.
669
+ Returns:
670
+ advantages: NestedFloatTensors
671
+ """
672
+
673
+ mb_size = self.mini_batch_size
674
+ batch_size = trajectories.rollout_ids.shape[0]
675
+ agent_id = trajectories.agent_ids[0]
676
+ batch_rewards = trajectories.batch_rewards
677
+
678
+ ######################################
679
+ # use critic for advantage estimation
680
+ ######################################
681
+ if self.use_gae:
682
+ if "buffer" in agent_id:
683
+ self.critic.eval()
684
+ training = False
685
+ else:
686
+ self.critic.train()
687
+ training = True
688
+ advantages = []
689
+ # critic_loss_scaling_factor comes learning single critic for two agents
690
+ normalization_factor = (
691
+ np.ceil(batch_size / mb_size).astype(int) * critic_loss_scaling_factor
692
+ )
693
+ # For each minibatch
694
+ for mb in range(0, batch_size, mb_size):
695
+ trajectory_mb = trajectories[mb : mb + mb_size]
696
+ trajectory_mb.to(self.device)
697
+ rewards_mb = trajectory_mb.batch_rewards
698
+ (
699
+ tokens_mb,
700
+ state_ends_mask_mb,
701
+ timestep_counts,
702
+ ) = trajectory_mb.get_padded_tensors_for_critic()
703
+ # critic causal attention up to end flags
704
+ if training:
705
+ vals_estimate_full = self.critic(tokens_mb)
706
+ else:
707
+ with torch.no_grad():
708
+ vals_estimate_full = self.critic(tokens_mb)
709
+
710
+ # if vals_estimate_full.dim() == 3:
711
+ # vals_estimate_full = vals_estimate_full.squeeze(-1)
712
+
713
+ # Select only positions where states end, per sample → list of (jT,)
714
+ B = tokens_mb.shape[0]
715
+ vals_list = [
716
+ vals_estimate_full[b][state_ends_mask_mb[b]] for b in range(B)
717
+ ]
718
+
719
+ # Pad to (B, max_jT) = (B, S)
720
+ vals_estimate_mb = pad_sequence(
721
+ vals_list, batch_first=True, padding_value=0.0
722
+ )
723
+ dtype = vals_estimate_mb.dtype
724
+ rewards_mb = pad_sequence(
725
+ rewards_mb, batch_first=True, padding_value=0.0
726
+ ).to(
727
+ dtype=dtype
728
+ ) # (B, S)
729
+ self.rollout_tally.add_metric(
730
+ path=["batch_rewards"],
731
+ rollout_tally_item=RolloutTallyItem(
732
+ crn_ids=trajectory_mb.crn_ids,
733
+ rollout_ids=trajectory_mb.rollout_ids,
734
+ agent_ids=trajectory_mb.agent_ids,
735
+ metric_matrix=rewards_mb,
736
+ ),
737
+ )
738
+ if self.reward_normalizing_constant != 1.0:
739
+ rewards_mb /= self.reward_normalizing_constant
740
+
741
+ det_vals_estimate_mb = vals_estimate_mb.detach() # (B, max_jT)
742
+ self.rollout_tally.add_metric(
743
+ path=["mb_value_estimates_critic"],
744
+ rollout_tally_item=RolloutTallyItem(
745
+ crn_ids=trajectory_mb.crn_ids,
746
+ rollout_ids=trajectory_mb.rollout_ids,
747
+ agent_ids=trajectory_mb.agent_ids,
748
+ metric_matrix=det_vals_estimate_mb,
749
+ ),
750
+ )
751
+
752
+ # Append a 0 value to the end of the value estimates
753
+ if det_vals_estimate_mb.shape[1] == rewards_mb.shape[1]:
754
+ Bsize = det_vals_estimate_mb.shape[0]
755
+ device = det_vals_estimate_mb.device
756
+ dtype = det_vals_estimate_mb.dtype
757
+ det_vals_estimate_mb = torch.cat(
758
+ [
759
+ det_vals_estimate_mb,
760
+ torch.zeros((Bsize, 1), device=device, dtype=dtype),
761
+ ],
762
+ dim=1,
763
+ ) # (B, max_jT+1)
764
+ else:
765
+ raise ValueError(
766
+ "Incompatible shapes for value estimates and rewards."
767
+ )
768
+
769
+ # Get annealed lambda
770
+ if self.use_gae_lambda_annealing:
771
+ annealing_constant = self.gae_lambda_annealing_method(
772
+ step=self.trainer_annealing_state.annealing_step_counter
773
+ )
774
+ annealed_lambda = (
775
+ self.gae_lambda_annealing_limit * annealing_constant
776
+ )
777
+ self.tally.add_metric(
778
+ path="annealed_lambda", metric=annealed_lambda
779
+ )
780
+ else:
781
+ annealed_lambda = self.gae_lambda_annealing_limit
782
+
783
+ # Get GAE advantages
784
+ gae_advantages = get_generalized_advantage_estimates(
785
+ rewards=rewards_mb,
786
+ value_estimates=det_vals_estimate_mb,
787
+ discount_factor=self.discount_factor,
788
+ lambda_coef=annealed_lambda,
789
+ ) # (B, max_jT)
790
+ self.rollout_tally.add_metric(
791
+ path=["mb_gae_advantages"],
792
+ rollout_tally_item=RolloutTallyItem(
793
+ crn_ids=trajectory_mb.crn_ids,
794
+ rollout_ids=trajectory_mb.rollout_ids,
795
+ agent_ids=trajectory_mb.agent_ids,
796
+ metric_matrix=gae_advantages,
797
+ ),
798
+ )
799
+ if training:
800
+ targets = (
801
+ gae_advantages.to(dtype=dtype) + det_vals_estimate_mb[:, :-1]
802
+ ) # (B, max_jT) # A(s, a, b) + V(s) = Q(s, a, b)
803
+ self.rollout_tally.add_metric(
804
+ path=["mb_targets_critic"],
805
+ rollout_tally_item=RolloutTallyItem(
806
+ crn_ids=trajectory_mb.crn_ids,
807
+ rollout_ids=trajectory_mb.rollout_ids,
808
+ agent_ids=trajectory_mb.agent_ids,
809
+ metric_matrix=targets,
810
+ ),
811
+ )
812
+ if self.critic_loss_type == "mse":
813
+ loss = F.mse_loss(
814
+ input=vals_estimate_mb,
815
+ target=targets,
816
+ )
817
+ elif self.critic_loss_type == "huber":
818
+ loss = F.huber_loss(
819
+ input=vals_estimate_mb,
820
+ target=targets,
821
+ )
822
+ self.tally.add_metric(path=["mb_critic_loss"], metric=loss.item())
823
+ # Accumulate gradient
824
+ loss /= normalization_factor
825
+ self.accelerator.backward(loss)
826
+ del loss
827
+ del targets
828
+ del vals_estimate_mb
829
+ del trajectory_mb
830
+ del vals_estimate_full
831
+
832
+ # Get jagged back using timestep_counts
833
+ advantages.extend(
834
+ [gae_advantages[i, : timestep_counts[i]] for i in range(B)]
835
+ )
836
+
837
+ ######################################
838
+ # use exclusively Monte Carlo returns & rloo for advantage estimation
839
+ ######################################
840
+ else:
841
+ lengths = [len(c) for c in batch_rewards]
842
+ padded_rewards = pad_sequence(
843
+ batch_rewards, batch_first=True, padding_value=0.0
844
+ )
845
+ self.rollout_tally.add_metric(
846
+ path=["mb_rewards"],
847
+ rollout_tally_item=RolloutTallyItem(
848
+ crn_ids=trajectories.crn_ids,
849
+ rollout_ids=trajectories.rollout_ids,
850
+ agent_ids=trajectories.agent_ids,
851
+ metric_matrix=padded_rewards,
852
+ ),
853
+ )
854
+ if self.reward_normalizing_constant != 1.0:
855
+ padded_rewards /= self.reward_normalizing_constant
856
+ padded_advantages = get_discounted_returns(
857
+ rewards=padded_rewards,
858
+ discount_factor=self.discount_factor,
859
+ ) # no baseline for now
860
+ if self.use_rloo:
861
+ is_grouped_by_rng = (
862
+ trajectories.crn_ids.unique().shape[0]
863
+ != trajectories.crn_ids.shape[0]
864
+ )
865
+ if is_grouped_by_rng:
866
+ for crn_id in trajectories.crn_ids.unique():
867
+ rng_mask = trajectories.crn_ids == crn_id
868
+ rng_advantages = padded_advantages[rng_mask]
869
+ rng_advantages, _ = get_rloo_credits(credits=rng_advantages)
870
+ padded_advantages[rng_mask] = rng_advantages
871
+ else:
872
+ padded_advantages, _ = get_rloo_credits(credits=padded_advantages)
873
+ self.rollout_tally.add_metric(
874
+ path=["mb_rloo_advantages"],
875
+ rollout_tally_item=RolloutTallyItem(
876
+ crn_ids=trajectories.crn_ids,
877
+ rollout_ids=trajectories.rollout_ids,
878
+ agent_ids=trajectories.agent_ids,
879
+ metric_matrix=padded_advantages,
880
+ ),
881
+ )
882
+ advantages = [
883
+ padded_advantages[i, : lengths[i]]
884
+ for i in range(padded_advantages.shape[0])
885
+ ]
886
+
887
+ if self.whiten_advantages_time_step_wise or self.whiten_advantages:
888
+ lengths = [len(c) for c in advantages]
889
+ padded_advantages = pad_sequence(
890
+ advantages, batch_first=True, padding_value=0.0
891
+ )
892
+ if self.whiten_advantages_time_step_wise:
893
+ whitened_padded_advantages = whiten_advantages_time_step_wise(
894
+ padded_advantages
895
+ )
896
+ path = ["mb_whitened_advantages_time_step_wise"]
897
+ elif self.whiten_advantages:
898
+ whitened_padded_advantages = whiten_advantages(padded_advantages)
899
+ path = ["mb_whitened_advantages"]
900
+ self.rollout_tally.add_metric(
901
+ path=path,
902
+ rollout_tally_item=RolloutTallyItem(
903
+ crn_ids=trajectories.crn_ids,
904
+ rollout_ids=trajectories.rollout_ids,
905
+ agent_ids=trajectories.agent_ids,
906
+ metric_matrix=whitened_padded_advantages,
907
+ ),
908
+ )
909
+ advantages = [
910
+ whitened_padded_advantages[i, : lengths[i]]
911
+ for i in range(whitened_padded_advantages.shape[0])
912
+ ]
913
+
914
+ self.trainer_annealing_state.annealing_step_counter += 1
915
+
916
+ return advantages
917
+
918
+ @abstractmethod
919
+ def set_agent_trajectory_data(
920
+ self, agent_id: str, roots: list[RolloutTreeRootNode]
921
+ ) -> None:
922
+ """
923
+ TOWRITE
924
+ """
925
+ pass
926
+
927
+ def set_trajectory_data(
928
+ self, roots: list[RolloutTreeRootNode], agent_ids: list[str]
929
+ ) -> None:
930
+ """
931
+ TOWRITE
932
+ """
933
+ for agent_id in agent_ids:
934
+ self.set_agent_trajectory_data(agent_id, roots)
935
+
936
+ @abstractmethod
937
+ def share_advantage_data(self) -> list[AdvantagePacket]:
938
+ pass
939
+
940
+ @abstractmethod
941
+ def receive_advantage_data(self, advantage_packets: list[AdvantagePacket]) -> None:
942
+ pass
943
+
944
+ def set_policy_gradient_data(self, agent_ids: list[str]) -> None:
945
+ """
946
+ Already set earlier # TODO: make it separate and clean
947
+ """
948
+ self.policy_gradient_data = None
949
+ # for agent_id, trajectory_batch in self.training_data.items():
950
+ # if "buffer" in agent_id:
951
+ # continue
952
+ for agent_id in agent_ids:
953
+ assert "buffer" not in agent_id, "Buffer agents do not train policy"
954
+ trajectory_batch = self.training_data[agent_id]
955
+ tokenwise_batch_credits = get_tokenwise_credits(
956
+ batch_timesteps=trajectory_batch.batch_timesteps,
957
+ batch_credits=trajectory_batch.batch_credits,
958
+ )
959
+ policy_gradient_data = TrainingBatch(
960
+ rollout_ids=trajectory_batch.rollout_ids,
961
+ batch_input_ids=trajectory_batch.batch_input_ids,
962
+ batch_action_mask=trajectory_batch.batch_action_mask,
963
+ batch_entropy_mask=trajectory_batch.batch_entropy_mask,
964
+ batch_credits=tokenwise_batch_credits,
965
+ batch_engine_log_probs=trajectory_batch.batch_engine_log_probs,
966
+ batch_timesteps=trajectory_batch.batch_timesteps,
967
+ )
968
+ if self.policy_gradient_data is None:
969
+ self.policy_gradient_data = policy_gradient_data
970
+ else:
971
+ self.policy_gradient_data.append(policy_gradient_data)
972
+
973
+ self.training_data = {}
974
+ self.tokenwise_tally = ContextualizedTokenwiseTally(
975
+ tokenizer=self.tokenizer,
976
+ paths=self.debug_path_list,
977
+ )
978
+
979
+ def train(self) -> None:
980
+ """
981
+ TOWRITE
982
+ """
983
+ assert self.policy_gradient_data is not None, "Policy gradient data is not set"
984
+ if self.critic_optimizer is not None:
985
+ if self.gradient_clipping is not None:
986
+ grad_norm = self.accelerator.clip_grad_norm_(
987
+ self.critic.parameters(), self.gradient_clipping
988
+ )
989
+ self.tally.add_metric(
990
+ path="gradient_norm_critic", metric=grad_norm.item()
991
+ )
992
+ # Take step
993
+ self.critic_optimizer.step()
994
+ self.critic_optimizer.zero_grad()
995
+ self.accelerator.clear(self.critic, self.critic_optimizer)
996
+ import gc
997
+
998
+ gc.collect()
999
+ torch.cuda.empty_cache()
1000
+ running_mean_logs = self.apply_reinforce_step(
1001
+ training_batch=self.policy_gradient_data
1002
+ )
1003
+ return running_mean_logs
1004
+
1005
+ def export_training_tally(self, identifier: str, folder: str) -> None:
1006
+ """
1007
+ Saves and resets the collected training metrics using the tally object.
1008
+ """
1009
+ os.makedirs(folder, exist_ok=True)
1010
+ self.tally.save(identifier=identifier, folder=folder)
1011
+ self.tokenwise_tally.save(
1012
+ path=os.path.join(folder, f"{identifier}_tokenwise.csv")
1013
+ )
1014
+ self.rollout_tally.save(identifier=identifier, folder=folder)
1015
+ self.tally.reset()
1016
+ self.tokenwise_tally = None
1017
+ self.rollout_tally.reset()
1018
+ self.debug_path_list = []
1019
+
1020
+ def export_optimizer_states(self) -> None:
1021
+ """
1022
+ Saves the optimizer states for both the main model and critic (if it exists).
1023
+ """
1024
+ try:
1025
+ os.makedirs(self.save_path, exist_ok=True)
1026
+
1027
+ torch.save(self.policy_optimizer.state_dict(), self.policy_optimizer_path)
1028
+ logger.info(f"Saved main optimizer state to {self.policy_optimizer_path}")
1029
+
1030
+ if self.critic_optimizer is not None:
1031
+ torch.save(
1032
+ self.critic_optimizer.state_dict(), self.critic_optimizer_path
1033
+ )
1034
+ logger.info(
1035
+ f"Saved critic optimizer state to {self.critic_optimizer_path}"
1036
+ )
1037
+ except Exception as e:
1038
+ logger.error(f"Error saving optimizer states: {str(e)}")
1039
+ raise
1040
+
1041
+ def export_trainer_annealing_state(self) -> None:
1042
+ """
1043
+ Saves the trainer state.
1044
+ """
1045
+ with open(self.trainer_annealing_state_path, "wb") as f:
1046
+ pickle.dump(self.trainer_annealing_state, f)
1047
+ logger.info(f"Saved trainer state to {self.trainer_annealing_state_path}")
1048
+
1049
+ def export_trainer_states(self) -> None:
1050
+ """
1051
+ Saves the trainer states.
1052
+ """
1053
+ self.export_optimizer_states()
1054
+ self.export_trainer_annealing_state()
src_code_for_reproducibility/training/trainer_independent.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+
3
+ """
4
+ import logging
5
+ import os
6
+ import sys
7
+ from typing import Union
8
+
9
+ import torch
10
+ import torch.nn.functional as F
11
+ from accelerate import Accelerator
12
+ from pandas._libs.tslibs.offsets import CBMonthBegin
13
+ from peft import LoraConfig
14
+ from torch.nn.utils.rnn import pad_sequence
15
+ from transformers import AutoModelForCausalLM, AutoTokenizer
16
+
17
+ from mllm.markov_games.rollout_tree import *
18
+ from mllm.markov_games.rollout_tree import RolloutTreeRootNode
19
+ from mllm.training.credit_methods import (
20
+ get_discounted_returns,
21
+ get_discounted_state_visitation_credits,
22
+ get_generalized_advantage_estimates,
23
+ get_rloo_credits,
24
+ )
25
+ from mllm.training.tally_metrics import Tally
26
+ from mllm.training.tally_tokenwise import ContextualizedTokenwiseTally
27
+ from mllm.training.tokenize_chats import *
28
+ from mllm.training.tokenize_chats import process_training_chat
29
+ from mllm.training.trainer_common import BaseTrainer
30
+ from mllm.training.training_data_utils import *
31
+ from mllm.training.training_data_utils import (
32
+ TrainingBatch,
33
+ TrajectoryBatch,
34
+ get_tokenwise_credits,
35
+ )
36
+ from mllm.utils.resource_context import resource_logger_context
37
+
38
+ logger = logging.getLogger(__name__)
39
+ logger.addHandler(logging.StreamHandler(sys.stdout))
40
+
41
+
42
+ @dataclass
43
+ class TrainingData:
44
+ agent_id: str
45
+ main_data: TrajectoryBatch
46
+ # list-of-tensors: per rollout advantages with length jT
47
+ main_advantages: list[torch.FloatTensor] | None = None
48
+
49
+
50
+ class TrainerNaive(BaseTrainer):
51
+ def set_agent_trajectory_data(
52
+ self, agent_id: str, roots: list[RolloutTreeRootNode]
53
+ ) -> None:
54
+ """
55
+ TOWRITE
56
+ """
57
+ # TODO: append to current batch data instead, else we will only train for one agent!
58
+ self.policy_gradient_data = None
59
+
60
+ # Tensorize Chats
61
+ rollout_ids = []
62
+ crn_ids = [] # common random number id
63
+ batch_input_ids = []
64
+ batch_action_mask = []
65
+ batch_entropy_mask = []
66
+ batch_timesteps = []
67
+ batch_state_ends_mask = []
68
+ batch_engine_log_probs = []
69
+ batch_rewards = []
70
+ for root in roots:
71
+ rollout_id = root.id
72
+ self.debug_path_list.append(
73
+ "mgid:" + str(rollout_id) + "_agent_id:" + agent_id
74
+ )
75
+ rollout_ids.append(rollout_id)
76
+ crn_ids.append(root.crn_id)
77
+ chat, rewards = get_main_chat_list_and_rewards(agent_id=agent_id, root=root)
78
+ (
79
+ input_ids,
80
+ action_mask,
81
+ entropy_mask,
82
+ timesteps,
83
+ state_ends_mask,
84
+ engine_log_probs,
85
+ ) = process_training_chat(
86
+ tokenizer=self.tokenizer,
87
+ chat_history=chat,
88
+ entropy_mask_regex=self.entropy_mask_regex,
89
+ exploration_prompts_to_remove=self.exploration_prompts_to_remove,
90
+ )
91
+ batch_input_ids.append(input_ids)
92
+ batch_action_mask.append(action_mask)
93
+ batch_entropy_mask.append(entropy_mask)
94
+ batch_timesteps.append(timesteps)
95
+ batch_state_ends_mask.append(state_ends_mask)
96
+ batch_engine_log_probs.append(engine_log_probs)
97
+ batch_rewards.append(rewards)
98
+
99
+ trajectory_batch = TrajectoryBatch(
100
+ rollout_ids=torch.tensor(rollout_ids, dtype=torch.int32),
101
+ crn_ids=torch.tensor(crn_ids, dtype=torch.int32),
102
+ agent_ids=[agent_id] * len(rollout_ids),
103
+ batch_input_ids=batch_input_ids,
104
+ batch_action_mask=batch_action_mask,
105
+ batch_entropy_mask=batch_entropy_mask,
106
+ batch_timesteps=batch_timesteps,
107
+ batch_state_ends_mask=batch_state_ends_mask,
108
+ batch_rewards=batch_rewards,
109
+ batch_engine_log_probs=batch_engine_log_probs,
110
+ )
111
+
112
+ # Get Advantages
113
+ batch_advantages: torch.FloatTensor = (
114
+ self.get_advantages_with_critic_gradient_accumulation(trajectory_batch)
115
+ )
116
+
117
+ # Discount state visitation (the mathematically correct way)
118
+ if not self.skip_discounted_state_visitation:
119
+ for i in range(len(batch_advantages)):
120
+ batch_advantages[i] = get_discounted_state_visitation_credits(
121
+ batch_advantages[i].unsqueeze(0),
122
+ self.discount_factor,
123
+ ).squeeze(0)
124
+
125
+ self.training_data[agent_id] = TrainingData(
126
+ agent_id=agent_id,
127
+ main_data=trajectory_batch,
128
+ main_advantages=batch_advantages,
129
+ )
130
+
131
+ def receive_advantage_data(self, advantage_packets: list[AdvantagePacket]):
132
+ """
133
+ This trainer ignores the advantages of the other trainers.
134
+ """
135
+ for agent_id, agent_data in self.training_data.items():
136
+ self.training_data[agent_id] = agent_data.main_data
137
+ self.training_data[agent_id].batch_credits = agent_data.main_advantages
138
+
139
+ def share_advantage_data(self) -> list[AdvantagePacket]:
140
+ """
141
+ Share the advantage data with other agents.
142
+ Returns:
143
+ AdvantagePacket: The advantage packet containing the agent's advantages.
144
+ """
145
+ logger.info(f"Sharing advantage data.")
146
+ advantage_packets = []
147
+ for agent_id, agent_data in self.training_data.items():
148
+ advantage_packets.append(
149
+ AdvantagePacket(
150
+ agent_id=agent_id,
151
+ rollout_ids=agent_data.main_data.rollout_ids,
152
+ main_advantages=agent_data.main_advantages,
153
+ )
154
+ )
155
+ return advantage_packets
src_code_for_reproducibility/training/trainer_sum_rewards.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+
3
+ """
4
+ import logging
5
+ import os
6
+ import sys
7
+ from typing import Union
8
+
9
+ import torch
10
+ import torch.nn.functional as F
11
+ from accelerate import Accelerator
12
+ from pandas._libs.tslibs.offsets import CBMonthBegin
13
+ from peft import LoraConfig
14
+ from torch.nn.utils.rnn import pad_sequence
15
+ from transformers import AutoModelForCausalLM, AutoTokenizer
16
+
17
+ from mllm.markov_games.rollout_tree import *
18
+ from mllm.markov_games.rollout_tree import RolloutTreeRootNode
19
+ from mllm.training.credit_methods import (
20
+ get_discounted_returns,
21
+ get_discounted_state_visitation_credits,
22
+ get_generalized_advantage_estimates,
23
+ get_rloo_credits,
24
+ )
25
+ from mllm.training.tally_metrics import Tally
26
+ from mllm.training.tally_rollout import RolloutTally, RolloutTallyItem
27
+ from mllm.training.tally_tokenwise import ContextualizedTokenwiseTally
28
+ from mllm.training.tokenize_chats import *
29
+ from mllm.training.tokenize_chats import process_training_chat
30
+ from mllm.training.trainer_common import BaseTrainer
31
+ from mllm.training.trainer_independent import TrainerNaive, TrainingData
32
+ from mllm.training.training_data_utils import *
33
+ from mllm.training.training_data_utils import (
34
+ AdvantagePacket,
35
+ TrainingBatch,
36
+ TrajectoryBatch,
37
+ get_tokenwise_credits,
38
+ )
39
+ from mllm.utils.resource_context import resource_logger_context
40
+
41
+ logger = logging.getLogger(__name__)
42
+ logger.addHandler(logging.StreamHandler(sys.stdout))
43
+
44
+
45
+ class TrainerSumRewards(TrainerNaive):
46
+ def receive_advantage_data(self, advantage_packets: list[AdvantagePacket]):
47
+ """
48
+ Sums the advantages of the other trainers
49
+ """
50
+ logger.info(f"Receiving advantage packets.")
51
+
52
+ assert (
53
+ len(advantage_packets) > 0
54
+ ), "At least one advantage packet must be provided."
55
+
56
+ for agent_id, agent_data in self.training_data.items():
57
+ coagent_advantage_packets = [
58
+ packet for packet in advantage_packets if packet.agent_id != agent_id
59
+ ]
60
+ agent_rollout_ids = agent_data.main_data.rollout_ids
61
+ agent_advantages = agent_data.main_advantages
62
+ co_agent_advantages = []
63
+ for rollout_id in agent_rollout_ids:
64
+ for co_agent_packet in coagent_advantage_packets:
65
+ if rollout_id in co_agent_packet.rollout_ids:
66
+ index = torch.where(rollout_id == co_agent_packet.rollout_ids)[
67
+ 0
68
+ ].item()
69
+ co_agent_advantages.append(
70
+ co_agent_packet.main_advantages[index]
71
+ )
72
+ # assumes that its two player game, with one co-agent
73
+ break
74
+ assert len(co_agent_advantages) == len(agent_advantages)
75
+ B = len(agent_advantages)
76
+ assert all(
77
+ a.shape[0] == b.shape[0]
78
+ for a, b in zip(co_agent_advantages, agent_advantages)
79
+ ), "Number of advantages must match in order to sum them up."
80
+
81
+ # Get padded tensors (advantage alignment is invariant to padding)
82
+ lengths = torch.tensor(
83
+ [len(t) for t in agent_advantages],
84
+ device=self.device,
85
+ dtype=torch.long,
86
+ )
87
+ padded_main_advantages = pad_sequence(
88
+ agent_advantages, batch_first=True, padding_value=0.0
89
+ )
90
+
91
+ padded_co_agent_advantages = pad_sequence(
92
+ co_agent_advantages, batch_first=True, padding_value=0.0
93
+ )
94
+
95
+ # Create training batch data
96
+ sum_of_ad_credits = padded_main_advantages + padded_co_agent_advantages
97
+ self.rollout_tally.add_metric(
98
+ path=["sum_of_ad_credits"],
99
+ rollout_tally_item=RolloutTallyItem(
100
+ crn_ids=agent_data.main_data.crn_ids,
101
+ rollout_ids=agent_data.main_data.rollout_ids,
102
+ agent_ids=agent_data.main_data.agent_ids,
103
+ metric_matrix=sum_of_ad_credits,
104
+ ),
105
+ )
106
+
107
+ if not self.skip_discounted_state_visitation:
108
+ sum_of_ad_credits = get_discounted_state_visitation_credits(
109
+ sum_of_ad_credits,
110
+ self.discount_factor,
111
+ )
112
+ self.rollout_tally.add_metric(
113
+ path=["discounted_state_visitation_credits"],
114
+ rollout_tally_item=RolloutTallyItem(
115
+ crn_ids=agent_data.main_data.crn_ids,
116
+ rollout_ids=agent_data.main_data.rollout_ids,
117
+ agent_ids=agent_data.main_data.agent_ids,
118
+ metric_matrix=sub_tensors[
119
+ "discounted_state_visitation_credits"
120
+ ],
121
+ ),
122
+ )
123
+
124
+ # Slice back to jagged and convert to tokenwise credits
125
+ sum_of_ad_credits = [sum_of_ad_credits[i, : lengths[i]] for i in range(B)]
126
+ self.training_data[agent_id] = agent_data.main_data
127
+ self.training_data[agent_id].batch_credits = sum_of_ad_credits
src_code_for_reproducibility/training/training_data_utils.py ADDED
@@ -0,0 +1,394 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Literal, Optional, Tuple
3
+
4
+ import torch
5
+ from torch.nn.utils.rnn import pad_sequence
6
+
7
+ from mllm.markov_games.rollout_tree import (
8
+ ChatTurn,
9
+ RolloutTreeBranchNode,
10
+ RolloutTreeNode,
11
+ RolloutTreeRootNode,
12
+ )
13
+
14
+
15
+ @dataclass
16
+ class AdvantagePacket:
17
+ agent_id: str
18
+ rollout_ids: torch.IntTensor # (B,)
19
+ # list-of-tensors
20
+ main_advantages: list[torch.FloatTensor]
21
+
22
+
23
+ class TrainingChatTurn:
24
+ # TODO: simplify by making this a child of ChatTurn
25
+ """
26
+ This class contains the chat turns for a single agent.
27
+ It is like ChatTurn, but with the time step added.
28
+ """
29
+
30
+ def __init__(
31
+ self,
32
+ time_step: int,
33
+ role: str,
34
+ agent_id: str,
35
+ content: str,
36
+ chat_template_token_ids: list[int],
37
+ reasoning_content: str,
38
+ is_state_end: bool,
39
+ out_token_ids: Optional[list[int]] = None,
40
+ log_probs: Optional[list[float]] = None,
41
+ ) -> None:
42
+ self.time_step = time_step
43
+ self.role = role
44
+ self.agent_id = agent_id
45
+ self.content = content
46
+ self.chat_template_token_ids = chat_template_token_ids
47
+ self.reasoning_content = reasoning_content
48
+ self.is_state_end = is_state_end
49
+ self.out_token_ids = out_token_ids
50
+ self.log_probs = log_probs
51
+
52
+ def dict(self):
53
+ return {
54
+ "time_step": self.time_step,
55
+ "role": self.role,
56
+ "agent_id": self.agent_id,
57
+ "content": self.content,
58
+ "chat_template_token_ids": self.chat_template_token_ids,
59
+ "reasoning_content": self.reasoning_content,
60
+ "is_state_end": self.is_state_end,
61
+ "out_token_ids": self.out_token_ids,
62
+ "log_probs": self.log_probs,
63
+ }
64
+
65
+
66
+ def get_main_chat_list_and_rewards(
67
+ agent_id: str, root: RolloutTreeRootNode | RolloutTreeNode
68
+ ) -> Tuple[list[TrainingChatTurn], torch.FloatTensor]:
69
+ """
70
+ This method traverses a rollout tree and returns a the list of ChatTurn
71
+ for an agent. If it encounters a branch node, it follows the main path.
72
+ """
73
+ # TODO; extend for all trees, not just linear
74
+ if isinstance(root, RolloutTreeRootNode):
75
+ current_node = root.child
76
+ else:
77
+ current_node = root
78
+
79
+ chat = []
80
+ rewards = []
81
+ while current_node is not None:
82
+ if isinstance(current_node, RolloutTreeBranchNode):
83
+ current_node = current_node.main_child
84
+ reward: float = current_node.step_log.simulation_step_log.rewards[agent_id]
85
+ rewards.append(reward)
86
+ chat_turns: list[TrainingChatTurn] = current_node.step_log.action_logs[
87
+ agent_id
88
+ ].chat_turns
89
+ chat_turns = [
90
+ TrainingChatTurn(time_step=current_node.time_step, **turn.model_dump())
91
+ for turn in chat_turns
92
+ ]
93
+ chat.extend(chat_turns)
94
+ current_node = current_node.child
95
+ return chat, torch.FloatTensor(rewards)
96
+
97
+
98
+ def get_tokenwise_credits(
99
+ # B := batch size, S := number of tokens / seq. length, T := number of states. `j` stands for jagged (see pytorch nested tensors.)
100
+ batch_timesteps: torch.IntTensor | torch.Tensor, # (B, jS),
101
+ batch_credits: torch.FloatTensor | torch.Tensor, # (B, jT)
102
+ ) -> torch.FloatTensor | torch.Tensor: # (B, jS)
103
+ """
104
+ TOWRITE
105
+ """
106
+ # TODO vectorize this code
107
+ batch_token_credits = []
108
+ for credits, timesteps in zip(batch_credits, batch_timesteps):
109
+ token_credits = torch.zeros_like(
110
+ timesteps,
111
+ dtype=credits.dtype,
112
+ device=timesteps.device,
113
+ )
114
+ for idx, credit in enumerate(credits):
115
+ token_credits[timesteps == idx] = credit
116
+ batch_token_credits.append(token_credits)
117
+ return batch_token_credits
118
+
119
+
120
+ @dataclass
121
+ class TrajectoryBatch:
122
+ """
123
+ Tensorized batch of trajectories using list-of-tensors for jagged dimensions.
124
+ """
125
+
126
+ # B := batch size, S := number of tokens / seq. length, T := number of states.
127
+ rollout_ids: torch.IntTensor # (B,)
128
+ crn_ids: torch.IntTensor # (B,)
129
+ agent_ids: list[str] # (B,)
130
+ batch_input_ids: list[torch.LongTensor] # List[(jS,)]
131
+ batch_action_mask: list[torch.BoolTensor] # List[(jS,)]
132
+ batch_entropy_mask: list[torch.BoolTensor] # List[(jS,)]
133
+ batch_timesteps: list[torch.IntTensor] # List[(jS,)]
134
+ batch_state_ends_mask: list[torch.BoolTensor] # List[(jS,)]
135
+ batch_engine_log_probs: Optional[list[torch.FloatTensor]] # List[(jS,)]
136
+ batch_rewards: list[torch.FloatTensor] # List[(jT,)]
137
+ batch_credits: Optional[list[torch.FloatTensor]] = None # List[(jS,)]
138
+
139
+ def __post_init__(self):
140
+ """
141
+ Validate per-sample consistency.
142
+ """
143
+ B = self.rollout_ids.shape[0]
144
+ assert (
145
+ self.crn_ids.shape[0] == B
146
+ ), "RNG IDs must have length equal to batch size."
147
+ assert (
148
+ len(self.agent_ids) == B
149
+ ), "agent_ids must have length equal to batch size."
150
+ assert (
151
+ len(self.batch_input_ids)
152
+ == len(self.batch_action_mask)
153
+ == len(self.batch_entropy_mask)
154
+ == len(self.batch_timesteps)
155
+ == len(self.batch_state_ends_mask)
156
+ == len(self.batch_engine_log_probs)
157
+ == len(self.batch_rewards)
158
+ == B
159
+ ), "Jagged lists must all have length equal to batch size."
160
+
161
+ for b in range(B):
162
+ nb_rewards = int(self.batch_rewards[b].shape[0])
163
+ nb_timesteps = int(torch.max(self.batch_timesteps[b]).item()) + 1
164
+ assert (
165
+ nb_rewards == nb_timesteps
166
+ ), "Number of rewards and timesteps mismatch."
167
+ assert (
168
+ self.batch_input_ids[b].shape[0]
169
+ == self.batch_action_mask[b].shape[0]
170
+ == self.batch_entropy_mask[b].shape[0]
171
+ == self.batch_engine_log_probs[b].shape[0]
172
+ == self.batch_timesteps[b].shape[0]
173
+ ), "Tensors must have the same shape along the jagged dimension."
174
+ assert (
175
+ int(self.batch_state_ends_mask[b].sum())
176
+ == self.batch_rewards[b].shape[0]
177
+ ), "Number of rewards must match number of state ends."
178
+
179
+ """
180
+ Entries:
181
+ Here, we ignore the batch dimension.
182
+ input_ids:
183
+ All of the tokens of both the user and the assistant, flattened.
184
+ action_mask:
185
+ Set to true on the tokens of the assistant (tokens generated by the model).
186
+ timesteps:
187
+ Therefore, max(timesteps) = Ns - 1.
188
+ state_ends_idx:
189
+ Indices of the tokens at which state descriptions end.
190
+ rewards:
191
+ rewards[t] := R_t(s_t, a_t)
192
+ Example:
193
+ position: "0 1 2 3 4 5 6 7 8 9 10 11 12 13 14"
194
+ input_ids: "U U U a a a U a U a a a U U U" (U := User, a := Assistant)
195
+ action_mask: "x x x ✓ ✓ ✓ x ✓ x ✓ ✓ ✓ x x x"
196
+ timestep: "0 0 0 0 0 0 1 1 1 1 1 1 2 2 2"
197
+ state_ends_dx: [2, 6, 14]
198
+ rewards: [r0, r1, r2]
199
+ """
200
+
201
+ def __getitem__(self, key) -> "TrajectoryBatch":
202
+ if isinstance(key, slice):
203
+ return TrajectoryBatch(
204
+ rollout_ids=self.rollout_ids.__getitem__(key),
205
+ crn_ids=self.crn_ids.__getitem__(key),
206
+ agent_ids=self.agent_ids[key],
207
+ batch_input_ids=self.batch_input_ids[key],
208
+ batch_action_mask=self.batch_action_mask[key],
209
+ batch_entropy_mask=self.batch_entropy_mask[key],
210
+ batch_timesteps=self.batch_timesteps[key],
211
+ batch_state_ends_mask=self.batch_state_ends_mask[key],
212
+ batch_engine_log_probs=self.batch_engine_log_probs[key],
213
+ batch_rewards=self.batch_rewards[key],
214
+ batch_credits=self.batch_credits[key] if self.batch_credits else None,
215
+ )
216
+
217
+ def __len__(self):
218
+ return len(self.batch_input_ids)
219
+
220
+ def to(self, device):
221
+ self.rollout_ids = self.rollout_ids.to(device)
222
+ self.crn_ids = self.crn_ids.to(device)
223
+ self.batch_input_ids = [t.to(device) for t in self.batch_input_ids]
224
+ self.batch_action_mask = [t.to(device) for t in self.batch_action_mask]
225
+ self.batch_entropy_mask = [t.to(device) for t in self.batch_entropy_mask]
226
+ self.batch_timesteps = [t.to(device) for t in self.batch_timesteps]
227
+ self.batch_state_ends_mask = [t.to(device) for t in self.batch_state_ends_mask]
228
+ self.batch_engine_log_probs = [
229
+ t.to(device) for t in self.batch_engine_log_probs
230
+ ]
231
+ self.batch_rewards = [t.to(device) for t in self.batch_rewards]
232
+ self.batch_credits = (
233
+ [t.to(device) for t in self.batch_credits] if self.batch_credits else None
234
+ )
235
+
236
+ def get_padded_tensors_for_critic(self):
237
+ """
238
+ Returns:
239
+ padded_batch_input_ids: (B, P)
240
+ padded_batch_state_ends_mask: (B, P)
241
+ timestep_counts: (B,) tensor of ints indicating number of states per sample
242
+ """
243
+ padded_batch_input_ids = pad_sequence(
244
+ self.batch_input_ids, batch_first=True, padding_value=0
245
+ )
246
+ padded_batch_state_ends_mask = pad_sequence(
247
+ self.batch_state_ends_mask, batch_first=True, padding_value=0
248
+ ).bool()
249
+ # number of states equals number of True in state_ends_mask
250
+ timestep_counts = torch.tensor(
251
+ [int(mask.sum().item()) for mask in self.batch_state_ends_mask],
252
+ device=padded_batch_input_ids.device,
253
+ dtype=torch.long,
254
+ )
255
+ return padded_batch_input_ids, padded_batch_state_ends_mask, timestep_counts
256
+
257
+
258
+ timestep = int
259
+
260
+
261
+ @dataclass
262
+ class PaddedTensorTrainingBatch:
263
+ batch_input_ids: torch.LongTensor | torch.Tensor
264
+ batch_action_mask: torch.BoolTensor | torch.Tensor
265
+ batch_entropy_mask: Optional[torch.BoolTensor | torch.Tensor]
266
+ batch_credits: torch.FloatTensor | torch.Tensor
267
+ batch_engine_log_probs: torch.FloatTensor | torch.Tensor
268
+ batch_timesteps: torch.IntTensor | torch.Tensor
269
+
270
+ def __len__(self):
271
+ return self.batch_input_ids.shape[0]
272
+
273
+ def to(self, device):
274
+ self.batch_input_ids = self.batch_input_ids.to(device)
275
+ self.batch_action_mask = self.batch_action_mask.to(device)
276
+ self.batch_entropy_mask = self.batch_entropy_mask.to(device)
277
+ self.batch_credits = self.batch_credits.to(device)
278
+ self.batch_engine_log_probs = self.batch_engine_log_probs.to(device)
279
+ self.batch_timesteps = self.batch_timesteps.to(device)
280
+
281
+
282
+ @dataclass
283
+ class TrainingBatch:
284
+ rollout_ids: torch.IntTensor | torch.Tensor # (B,)
285
+ batch_input_ids: list[torch.LongTensor] # List[(jS,)]
286
+ batch_action_mask: list[torch.BoolTensor] # List[(jS,)]
287
+ batch_entropy_mask: Optional[list[torch.BoolTensor]] # List[(jS,)]
288
+ batch_credits: list[torch.FloatTensor] # List[(jS,)]
289
+ batch_engine_log_probs: list[torch.FloatTensor] # List[(jS,)]
290
+ batch_timesteps: list[torch.IntTensor] # List[(jS,)]
291
+
292
+ def __post_init__(self):
293
+ # Put everything in the right device
294
+ # self.rollout_ids = self.rollout_ids.to("cuda" if torch.cuda.is_available() else "cpu")
295
+ # self.batch_input_ids = self.batch_input_ids.to("cuda" if torch.cuda.is_available() else "cpu")
296
+ # self.batch_action_mask = self.batch_action_mask.to("cuda" if torch.cuda.is_available() else "cpu")
297
+ # self.batch_credits = self.batch_credits.to("cuda" if torch.cuda.is_available() else "cpu")
298
+ # Ensure batch dimension is present
299
+ assert (
300
+ len(self.batch_input_ids)
301
+ == len(self.batch_action_mask)
302
+ == len(self.batch_entropy_mask)
303
+ == len(self.batch_credits)
304
+ == len(self.batch_engine_log_probs)
305
+ == len(self.batch_timesteps)
306
+ == self.rollout_ids.shape[0]
307
+ ), "Jagged lists must all have length equal to batch size."
308
+ for inp, mask, cred, engine_log_prob, timestep in zip(
309
+ self.batch_input_ids,
310
+ self.batch_action_mask,
311
+ self.batch_credits,
312
+ self.batch_engine_log_probs,
313
+ self.batch_timesteps,
314
+ ):
315
+ assert (
316
+ inp.shape[0]
317
+ == mask.shape[0]
318
+ == cred.shape[0]
319
+ == engine_log_prob.shape[0]
320
+ == timestep.shape[0]
321
+ ), "Tensors must have the same shapes along the jagged dimension."
322
+
323
+ def __getitem__(self, key) -> "TrainingBatch":
324
+ if isinstance(key, slice):
325
+ return TrainingBatch(
326
+ rollout_ids=self.rollout_ids.__getitem__(key),
327
+ batch_input_ids=self.batch_input_ids[key],
328
+ batch_action_mask=self.batch_action_mask[key],
329
+ batch_entropy_mask=self.batch_entropy_mask[key],
330
+ batch_credits=self.batch_credits[key],
331
+ batch_engine_log_probs=self.batch_engine_log_probs[key],
332
+ batch_timesteps=self.batch_timesteps[key],
333
+ )
334
+
335
+ def __len__(self):
336
+ return len(self.batch_input_ids)
337
+
338
+ def to(self, device):
339
+ self.rollout_ids = self.rollout_ids.to(device)
340
+ self.batch_input_ids = [t.to(device) for t in self.batch_input_ids]
341
+ self.batch_action_mask = [t.to(device) for t in self.batch_action_mask]
342
+ self.batch_entropy_mask = [t.to(device) for t in self.batch_entropy_mask]
343
+ self.batch_credits = [t.to(device) for t in self.batch_credits]
344
+ self.batch_engine_log_probs = [
345
+ t.to(device) for t in self.batch_engine_log_probs
346
+ ]
347
+ self.batch_timesteps = [t.to(device) for t in self.batch_timesteps]
348
+
349
+ def get_padded_tensors(self, padding: float = 0.0):
350
+ """
351
+ TOWRITE
352
+ Always pad to the right.
353
+ """
354
+ padded_batch_input_ids = pad_sequence(
355
+ self.batch_input_ids, batch_first=True, padding_value=int(padding)
356
+ )
357
+ padded_batch_action_mask = pad_sequence(
358
+ [m.to(dtype=torch.bool) for m in self.batch_action_mask],
359
+ batch_first=True,
360
+ padding_value=False,
361
+ )
362
+ padded_batch_entropy_mask = pad_sequence(
363
+ self.batch_entropy_mask, batch_first=True, padding_value=False
364
+ )
365
+ padded_batch_credits = pad_sequence(
366
+ self.batch_credits, batch_first=True, padding_value=float(padding)
367
+ )
368
+ padded_batch_engine_log_probs = pad_sequence(
369
+ self.batch_engine_log_probs, batch_first=True, padding_value=float(padding)
370
+ )
371
+ padded_batch_timesteps = pad_sequence(
372
+ self.batch_timesteps, batch_first=True, padding_value=0
373
+ )
374
+
375
+ return PaddedTensorTrainingBatch(
376
+ padded_batch_input_ids,
377
+ padded_batch_action_mask,
378
+ padded_batch_entropy_mask,
379
+ padded_batch_credits,
380
+ padded_batch_engine_log_probs,
381
+ padded_batch_timesteps,
382
+ )
383
+
384
+ def append(self, other: "TrainingBatch"):
385
+ self.rollout_ids = torch.cat([self.rollout_ids, other.rollout_ids])
386
+ self.batch_input_ids.extend(other.batch_input_ids)
387
+ self.batch_action_mask.extend(other.batch_action_mask)
388
+ self.batch_entropy_mask.extend(other.batch_entropy_mask)
389
+ self.batch_credits.extend(other.batch_credits)
390
+ self.batch_engine_log_probs.extend(other.batch_engine_log_probs)
391
+ self.batch_timesteps.extend(other.batch_timesteps)
392
+
393
+
394
+ timestep = int
src_code_for_reproducibility/utils/__init__.py ADDED
File without changes
src_code_for_reproducibility/utils/dict_get_path.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ def get_from_nested_dict(a:dict, path) -> any:
3
+ # path is string or list of string
4
+ try:
5
+ if isinstance(path, str):
6
+ return a[path]
7
+ else:
8
+ for p in path:
9
+ a = a[p]
10
+ return a
11
+ except Exception:
12
+ return None
src_code_for_reproducibility/utils/format_time.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ def format_time(seconds):
2
+ if seconds >= 3600:
3
+ return f"{int(seconds // 3600)}h {int((seconds % 3600) // 60)}m {int(seconds % 60)}s"
4
+ elif seconds >= 60:
5
+ return f"{int(seconds // 60)}m {int(seconds % 60)}s"
6
+ else:
7
+ return f"{int(seconds)}s"
src_code_for_reproducibility/utils/gather_training_stats.py ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import csv
3
+ import gc
4
+ import json
5
+ import logging
6
+ import os
7
+ import pickle
8
+ import random
9
+ import re
10
+ import subprocess
11
+ import sys
12
+ import time
13
+ from datetime import datetime
14
+ from statistics import mean
15
+ from typing import Any, Dict
16
+
17
+ import hydra
18
+ import matplotlib.pyplot as plt
19
+ import numpy as np
20
+ import pandas as pd
21
+ import torch
22
+ from omegaconf import OmegaConf
23
+
24
+ from mllm.training.tally_metrics import Tally
25
+ from mllm.utils.stat_pack import StatPack
26
+
27
+
28
+ def get_from_nested_dict(dictio: dict, path: list[str]):
29
+ for sp in path[:-1]:
30
+ dictio = dictio[sp]
31
+ return dictio.get(path[-1])
32
+
33
+
34
+ def set_at_path(dictio: dict, path: list[str], value):
35
+ for sp in path[:-1]:
36
+ if sp not in dictio:
37
+ dictio[sp] = {}
38
+ dictio = dictio[sp]
39
+ dictio[path[-1]] = value
40
+
41
+
42
+ def produce_tabular_render(inpath: str, outpath: str = None):
43
+ """
44
+ TODO: docstring
45
+ """
46
+ with open(inpath, "r") as f:
47
+ data = json.load(f)
48
+ rollout_paths = data.keys()
49
+ for rollout_path in rollout_paths:
50
+ if outpath is None:
51
+ m_path = rollout_path.replace("/", "|")
52
+ m_path = m_path.replace(".json", "")
53
+ m_path = (
54
+ os.path.split(inpath)[0]
55
+ + "/contextualized_tabular_renders/"
56
+ + m_path
57
+ + "_tabular_render.render.csv"
58
+ )
59
+ # import pdb; pdb.set_trace()
60
+ os.makedirs(os.path.split(m_path)[0], exist_ok=True)
61
+ metrics = data[rollout_path]
62
+ d = {k: [] for k in metrics[0].keys()}
63
+ for m in metrics:
64
+ for k, v in m.items():
65
+ d[k].append(v)
66
+ d = pd.DataFrame(d)
67
+ d.to_csv(m_path)
68
+
69
+
70
+ def get_metric_paths(data: list[dict]):
71
+ d = data[0]
72
+ paths = []
73
+
74
+ def traverse_dict(d, current_path=[]):
75
+ for key, value in d.items():
76
+ new_path = current_path + [key]
77
+ if isinstance(value, dict):
78
+ traverse_dict(value, new_path)
79
+ else:
80
+ paths.append(new_path)
81
+
82
+ traverse_dict(d)
83
+ return paths
84
+
85
+
86
+ def print_metric_paths(data: list[dict]):
87
+ paths = get_metric_paths(data)
88
+ for p in paths:
89
+ print(p)
90
+
91
+
92
+ def get_metric_iteration_list(data: list[dict], metric_path: list[str]):
93
+ if isinstance(metric_path, str):
94
+ metric_path = [metric_path]
95
+ sgl = []
96
+ for d in data:
97
+ sgl.append(get_from_nested_dict(d, metric_path))
98
+ return sgl
99
+
100
+
101
+ def to_1d_numeric(x):
102
+ """Return a 1-D float array (or None if not numeric). Accepts scalars, numpy arrays, or nested list/tuple of them."""
103
+ if x is None:
104
+ return None
105
+ if isinstance(x, (int, float, np.number)):
106
+ return np.array([float(x)], dtype=float)
107
+ if isinstance(x, np.ndarray):
108
+ try:
109
+ return x.astype(float).ravel()
110
+ except Exception:
111
+ return None
112
+ if isinstance(x, (list, tuple)):
113
+ parts = []
114
+ for e in x:
115
+ arr = to_1d_numeric(e)
116
+ if arr is not None and arr.size > 0:
117
+ parts.append(arr)
118
+ if parts:
119
+ return np.concatenate(parts)
120
+ return None
121
+ return None
122
+
123
+
124
+ def get_single_metric_vector(data, metric_path, iterations=None):
125
+ if isinstance(metric_path, str):
126
+ metric_path = [metric_path]
127
+ if iterations == None:
128
+ iterations = len(data)
129
+ vecs = []
130
+ for d in data:
131
+ ar = get_from_nested_dict(d, metric_path)
132
+ arr = to_1d_numeric(ar)
133
+ if arr is not None:
134
+ vecs.append(arr)
135
+
136
+ return np.concatenate(vecs) if vecs else np.empty(0, dtype=float)
137
+
138
+
139
+ def _load_metrics_file(file_path: str):
140
+ if not (file_path.endswith(".tally.pkl") or file_path.endswith(".pkl")):
141
+ raise ValueError("Only *.tally.pkl files are supported.")
142
+ import pickle
143
+
144
+ with open(file_path, "rb") as f:
145
+ tree = pickle.load(f)
146
+ return tree
147
+
148
+
149
+ def get_leaf_items(array_tally: dict, prefix: list[str] = None):
150
+ if prefix is None:
151
+ prefix = []
152
+ for key, value in array_tally.items():
153
+ next_prefix = prefix + [str(key)]
154
+ if isinstance(value, dict):
155
+ yield from get_leaf_items(value, next_prefix)
156
+ else:
157
+ yield next_prefix, value
158
+
159
+
160
+ def _sanitize_filename_part(part: str) -> str:
161
+ s = part.replace("/", "|")
162
+ s = s.replace(" ", "_")
163
+ return s
164
+
165
+
166
+ def render_rt_tally_pkl_to_csvs(pkl_path: str, outdir: str):
167
+ """
168
+ This method takes care of tokenwise logging.
169
+ """
170
+ with open(pkl_path, "rb") as f:
171
+ payload = pickle.load(f)
172
+ # Backward compatibility: older tallies stored the dict directly
173
+ if isinstance(payload, dict) and "array_tally" in payload:
174
+ array_tally = payload.get("array_tally", {})
175
+ else:
176
+ array_tally = payload
177
+
178
+ os.makedirs(outdir, exist_ok=True)
179
+ trainer_id = os.path.basename(pkl_path).replace(".rt_tally.pkl", "")
180
+ for path_list, rollout_tally_items in get_leaf_items(array_tally):
181
+ # Create file and initiate writer
182
+ path_part = ".".join(_sanitize_filename_part(p) for p in path_list)
183
+ filename = f"{trainer_id}__{path_part}.render.csv"
184
+ out_path = os.path.join(outdir, filename)
185
+
186
+ # Write metric rows to CSV
187
+ with open(out_path, "w", newline="") as f:
188
+ writer = csv.writer(f)
189
+
190
+ # Write header row - need to determine metric column count from first rollout_tally_item
191
+ first_item = rollout_tally_items[0]
192
+ metric_cols = (
193
+ first_item.metric_matrix.shape[1]
194
+ if first_item.metric_matrix.ndim > 1
195
+ else 1
196
+ )
197
+ header = ["agent_id", "crn_id", "rollout_id"] + [
198
+ f"t_{i}" for i in range(metric_cols)
199
+ ]
200
+ writer.writerow(header)
201
+
202
+ for rollout_tally_item in rollout_tally_items:
203
+ crn_ids = rollout_tally_item.crn_ids
204
+ rollout_ids = rollout_tally_item.rollout_ids
205
+ agent_ids = rollout_tally_item.agent_ids
206
+ metric_matrix = rollout_tally_item.metric_matrix
207
+ for i in range(metric_matrix.shape[0]):
208
+ row_vals = metric_matrix[i].reshape(-1)
209
+ # Convert row_vals to a list to avoid numpy concatenation issues
210
+ row_vals = (
211
+ row_vals.tolist()
212
+ if hasattr(row_vals, "tolist")
213
+ else list(row_vals)
214
+ )
215
+ row_prefix = [
216
+ agent_ids[i],
217
+ crn_ids[i],
218
+ rollout_ids[i],
219
+ ]
220
+ writer.writerow(row_prefix + row_vals)
221
+
222
+
223
+ def tally_to_stat_pack(tally: Dict[str, Any]):
224
+ stat_pack = StatPack()
225
+ if "array_tally" in tally:
226
+ tally = tally["array_tally"]
227
+
228
+ # backward compatibility: will remove later, flatten keys in tally
229
+ def get_from_nested_dict(dictio: dict, path: list[str]):
230
+ for sp in path[:-1]:
231
+ dictio = dictio[sp]
232
+ return dictio.get(path[-1])
233
+
234
+ def get_metric_paths(tally: dict):
235
+ paths = []
236
+
237
+ def traverse_dict(tally, current_path=[]):
238
+ for key, value in tally.items():
239
+ new_path = current_path + [key]
240
+ if isinstance(value, dict):
241
+ traverse_dict(value, new_path)
242
+ else:
243
+ paths.append(new_path)
244
+
245
+ traverse_dict(tally)
246
+ return paths
247
+
248
+ paths = get_metric_paths(tally)
249
+ modified_tally = {}
250
+ for p in paths:
251
+ val = get_from_nested_dict(tally, p)
252
+ modified_tally["_".join(p)] = np.mean(val)
253
+ del tally
254
+ tally = modified_tally
255
+ for key, value in tally.items():
256
+ stat_pack.add_stat(key, value)
257
+ return stat_pack
src_code_for_reproducibility/utils/get_coagent_id.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+
2
+ def get_coagent_id(ids: list[str], agent_id:str) -> str | None:
3
+ for id in ids:
4
+ if id != agent_id: return id
src_code_for_reproducibility/utils/get_stochastic_game_lengths.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ def get_stochastic_game_lengths(
4
+ max_length,
5
+ nb_games,
6
+ continuation_prob,
7
+ same_length_batch=False
8
+ ):
9
+ """
10
+ Generates stochastic game lengths based on a geometric distribution.
11
+
12
+ Args:
13
+ max_length (int): The maximum length a game can have.
14
+ nb_games (int): The number of games to generate lengths for.
15
+ continuation_prob (float): The probability of the game continuing after each round.
16
+ same_length_batch (bool): If True, all games will have the same length.
17
+
18
+ Returns:
19
+ Array: An array of game lengths.
20
+ """
21
+ if continuation_prob == 1:
22
+ return [max_length] * nb_games
23
+ if same_length_batch:
24
+ length = np.random.geometric(1 - continuation_prob, 1)
25
+ game_lengths = np.repeat(length, nb_games)
26
+ else:
27
+ game_lengths = np.random.geometric(1 - continuation_prob, nb_games)
28
+
29
+ game_lengths = np.where(game_lengths > max_length, max_length, game_lengths)
30
+ return game_lengths.tolist()
src_code_for_reproducibility/utils/kill_sglang.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import psutil
2
+ import signal
3
+
4
+ target_name = "sglang::scheduler"
5
+ killed = []
6
+
7
+ def kill_sglang():
8
+ for proc in psutil.process_iter(['pid', 'name', 'cmdline']):
9
+ try:
10
+ # Some processes may not have a name or cmdline
11
+ cmdline = " ".join(proc.info['cmdline']) if proc.info['cmdline'] else ""
12
+ if target_name in cmdline:
13
+ print(f"Killing PID {proc.pid}: {cmdline}")
14
+ proc.send_signal(signal.SIGKILL)
15
+ killed.append(proc.pid)
16
+ except (psutil.NoSuchProcess, psutil.AccessDenied):
17
+ pass
src_code_for_reproducibility/utils/output_source_code.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ def output_source_code(model, output_path: str) -> None:
2
+ """
3
+ Outputs the source code of the model to the given path.
4
+ """
5
+ with open(output_path, "w") as f:
6
+ f.write(model.source_code)
src_code_for_reproducibility/utils/resource_context.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import time
3
+ from contextlib import contextmanager
4
+
5
+ import torch
6
+
7
+
8
+ def vram_usage():
9
+ output = ""
10
+ for i in range(torch.cuda.device_count()):
11
+ gpu_memory_allocated = torch.cuda.memory_allocated(i) / (
12
+ 1024**3
13
+ ) # Convert bytes to GB
14
+ gpu_memory_reserved = torch.cuda.memory_reserved(i) / (
15
+ 1024**3
16
+ ) # Convert bytes to GB
17
+ output += f"GPU {i}: Memory Allocated: {gpu_memory_allocated:.2f} GB, Memory Reserved: {gpu_memory_reserved:.2f} GB"
18
+ return output
19
+
20
+
21
+ def ram_usage():
22
+ import psutil
23
+
24
+ process = psutil.Process()
25
+ memory_info = process.memory_info()
26
+ ram_used = memory_info.rss / (1024**3) # Convert bytes to GB
27
+ return f"RAM Usage: {ram_used:.2f} GB"
28
+
29
+
30
+ @contextmanager
31
+ def resource_logger_context(logger: logging.Logger, task_description: str):
32
+ """
33
+ Context manager to log the resource usage of the current task.
34
+ Args:
35
+ logger: The logger to use to log the resource usage.
36
+ task_description: The description of the task to log.
37
+ Returns:
38
+ None
39
+ """
40
+ try:
41
+ initial_time = time.time()
42
+ # Assume CUDA is available and use device 0 only
43
+ total_mem_bytes = torch.cuda.get_device_properties(0).total_memory
44
+ initial_total_bytes = (
45
+ torch.cuda.memory_allocated(0) + torch.cuda.memory_reserved(0)
46
+ )
47
+ torch.cuda.reset_peak_memory_stats(0)
48
+ yield None
49
+ finally:
50
+ final_time = time.time()
51
+ # Ensure kernels within the block are accounted for
52
+ torch.cuda.synchronize()
53
+
54
+ # Compute metrics
55
+ final_allocated_bytes = torch.cuda.memory_allocated(0)
56
+ final_reserved_bytes = torch.cuda.memory_reserved(0)
57
+ final_total_bytes = final_allocated_bytes + final_reserved_bytes
58
+
59
+ delta_vram_percent_total = (
60
+ 100 * (final_total_bytes - initial_total_bytes) / total_mem_bytes
61
+ if total_mem_bytes
62
+ else 0.0
63
+ )
64
+ current_percent_vram_taken = (
65
+ 100 * final_total_bytes / total_mem_bytes if total_mem_bytes else 0.0
66
+ )
67
+ block_peak_percent = (
68
+ 100 * torch.cuda.max_memory_allocated(0) / total_mem_bytes
69
+ if total_mem_bytes
70
+ else 0.0
71
+ )
72
+ delta_time_str = time.strftime(
73
+ '%H:%M:%S', time.gmtime(final_time - initial_time)
74
+ )
75
+
76
+ logger.info(
77
+ f"For task: {task_description}, ΔVRAM % (total): {delta_vram_percent_total:.2f}%, Current % of VRAM taken: {current_percent_vram_taken:.2f}%, Block Peak % of device VRAM: {block_peak_percent:.2f}%, ΔTime: {delta_time_str}"
78
+ )
src_code_for_reproducibility/utils/rollout_tree_chat_htmls.py ADDED
@@ -0,0 +1,1921 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import List
3
+
4
+ from mllm.utils.rollout_tree_gather_utils import *
5
+
6
+
7
+ def html_from_chat_turns(chat_turns: List[ChatTurnLog]) -> str:
8
+ """
9
+ Render chat turns as a single, wrapping sequence of messages in time order.
10
+ Keep badge and message bubble styles, include time on every badge and
11
+ include rewards on assistant badges. Each message is individually
12
+ hide/show by click; when hidden, only the badge remains and "(...)" is
13
+ shown inline (not inside a bubble).
14
+ """
15
+ import html
16
+ import re as _re
17
+
18
+ # Prepare ordering: sort by (time_step, original_index) to keep stable order within same step
19
+ indexed_turns = list(enumerate(chat_turns))
20
+ indexed_turns.sort(key=lambda t: (t[1].time_step, t[0]))
21
+ assistant_agents = sorted({t.agent_id for t in chat_turns if t.role == "assistant"})
22
+ enable_split_view = len(assistant_agents) == 2
23
+
24
+ # CSS styles (simplified layout; no time-step or agent-column backgrounds)
25
+ css = """
26
+ <style>
27
+ :root {
28
+ --font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
29
+ --bg: #ffffff;
30
+ --text: #1c0b00;
31
+ --muted-text: #2C3E50;
32
+ --accent-muted: #BDC3C7;
33
+ --accent-muted-2: #D0D7DE;
34
+ --panel-bg: #F8FAFC;
35
+ --reward-color: #3a2e00; /* dark text for reward pill */
36
+ --font-size: 14px;
37
+ --border-width: 2px;
38
+ --corner-radius: 6px;
39
+ --pill-radius-left: 999px 0 0 999px;
40
+ --pill-radius-right: 0 999px 999px 0;
41
+ --inset-shadow: 0 1px 0 rgba(0,0,0,0.03) inset;
42
+
43
+ /* Chat View Colors */
44
+ --alice-bg: #dcf8c6;
45
+ --alice-border: #0eb224;
46
+ --bob-bg: #ffe4cc;
47
+ --bob-border: #ef8323;
48
+ --user-bg: #f5f5f5;
49
+ --chat-bg: #ffffff;
50
+ }
51
+ body {
52
+ font-family: var(--font-family);
53
+ margin: 12px;
54
+ background-color: var(--bg);
55
+ color: var(--text);
56
+ font-size: var(--font-size);
57
+ line-height: 1.5;
58
+ }
59
+
60
+ /* Chat View Styles */
61
+ #flow-chat {
62
+ max-width: 900px;
63
+ margin: 0 auto;
64
+ background: var(--chat-bg);
65
+ padding: 12px 16px 12px 8px;
66
+ border-radius: 8px;
67
+ }
68
+
69
+ .simultaneous-messages {
70
+ display: flex !important;
71
+ flex-direction: row !important;
72
+ flex-wrap: nowrap !important;
73
+ gap: 8px;
74
+ margin-bottom: 4px;
75
+ align-items: flex-start;
76
+ width: 100%;
77
+ overflow: hidden;
78
+ box-sizing: border-box;
79
+ }
80
+
81
+ .simultaneous-messages .chat-message {
82
+ flex: 1 1 0 !important;
83
+ margin-bottom: 0 !important;
84
+ display: flex !important;
85
+ flex-direction: row !important;
86
+ align-items: flex-start !important;
87
+ margin-left: 0 !important;
88
+ min-width: 0 !important;
89
+ max-width: 50% !important;
90
+ gap: 0 !important;
91
+ overflow: hidden !important;
92
+ }
93
+
94
+ .simultaneous-messages .chat-message-content {
95
+ max-width: 100% !important;
96
+ width: 100%;
97
+ align-items: flex-start !important;
98
+ margin-left: 0 !important;
99
+ overflow: hidden !important;
100
+ }
101
+
102
+ .simultaneous-messages .chat-message.agent-alice {
103
+ justify-content: flex-start !important;
104
+ }
105
+
106
+ .simultaneous-messages .chat-message.agent-bob {
107
+ justify-content: flex-end !important;
108
+ }
109
+
110
+ .simultaneous-messages .chat-message.agent-alice .chat-message-content {
111
+ margin-left: 0 !important;
112
+ align-items: flex-start !important;
113
+ }
114
+
115
+ .simultaneous-messages .chat-message.agent-bob .chat-message-content {
116
+ margin-left: auto !important;
117
+ margin-right: 0 !important;
118
+ align-items: flex-end !important;
119
+ }
120
+
121
+ .simultaneous-messages .chat-bubble {
122
+ max-width: 100%;
123
+ word-break: break-word;
124
+ overflow-wrap: break-word;
125
+ box-sizing: border-box;
126
+ }
127
+
128
+ .simultaneous-messages .chat-message.agent-alice .chat-bubble {
129
+ border-radius: 10px;
130
+ }
131
+
132
+ .simultaneous-messages .chat-message.agent-bob .chat-bubble {
133
+ border-radius: 10px;
134
+ }
135
+
136
+ .simultaneous-messages .chat-message.agent-alice .chat-header {
137
+ justify-content: flex-start;
138
+ flex-shrink: 0;
139
+ }
140
+
141
+ .simultaneous-messages .chat-message.agent-bob .chat-header {
142
+ justify-content: flex-end;
143
+ flex-shrink: 0;
144
+ }
145
+
146
+ .simultaneous-messages .chat-reasoning {
147
+ max-width: 100%;
148
+ overflow-wrap: break-word;
149
+ }
150
+
151
+ .chat-message {
152
+ display: flex;
153
+ margin-bottom: 2px;
154
+ align-items: flex-end;
155
+ gap: 6px;
156
+ position: relative;
157
+ margin-left: 36px;
158
+ }
159
+
160
+ .chat-message.agent-alice {
161
+ margin-left: 0;
162
+ }
163
+
164
+ .chat-message.agent-alice::before {
165
+ left: 0;
166
+ }
167
+
168
+ .chat-message.role-user {
169
+ opacity: 0.7;
170
+ }
171
+
172
+ .chat-message::before {
173
+ content: '';
174
+ position: absolute;
175
+ left: -36px;
176
+ top: 0;
177
+ bottom: 0;
178
+ width: 36px;
179
+ pointer-events: auto;
180
+ }
181
+
182
+ .merge-btn {
183
+ position: absolute;
184
+ left: -30px;
185
+ top: 50%;
186
+ transform: translateY(-50%);
187
+ width: 26px;
188
+ height: 26px;
189
+ border-radius: 4px;
190
+ border: 1.5px solid var(--accent-muted);
191
+ background: white;
192
+ cursor: pointer;
193
+ font-size: var(--font-size);
194
+ opacity: 0;
195
+ display: flex;
196
+ align-items: center;
197
+ justify-content: center;
198
+ transition: opacity 0.2s ease, transform 0.1s ease;
199
+ padding: 0;
200
+ line-height: 1;
201
+ z-index: 10;
202
+ }
203
+
204
+ .chat-message:hover .merge-btn,
205
+ .merge-btn:hover {
206
+ opacity: 1;
207
+ }
208
+
209
+ .merge-btn:hover {
210
+ background: var(--panel-bg);
211
+ border-color: var(--accent-muted-2);
212
+ transform: translateY(-50%) scale(1.15);
213
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.15);
214
+ }
215
+
216
+ .merge-btn:active {
217
+ transform: translateY(-50%) scale(0.95);
218
+ }
219
+
220
+ .chat-message.agent-alice .merge-btn {
221
+ left: -30px;
222
+ }
223
+
224
+ .chat-message.role-user .merge-btn {
225
+ display: none !important;
226
+ }
227
+
228
+ .simultaneous-messages .merge-btn {
229
+ opacity: 0 !important;
230
+ pointer-events: none;
231
+ }
232
+
233
+ .simultaneous-messages {
234
+ padding: 6px 0 6px 0 !important;
235
+ margin-left: 0 !important;
236
+ margin-right: 0 !important;
237
+ position: relative !important;
238
+ background: transparent !important;
239
+ border-radius: 0 !important;
240
+ box-sizing: border-box !important;
241
+ overflow: visible !important;
242
+ max-width: 100% !important;
243
+ border: none !important;
244
+ transition: padding 0.2s ease !important;
245
+ }
246
+
247
+ .simultaneous-messages:hover {
248
+ padding-top: 40px !important;
249
+ }
250
+
251
+ .simultaneous-messages::before {
252
+ content: '⇅ Merged';
253
+ position: absolute;
254
+ left: 0 !important;
255
+ top: 8px !important;
256
+ font-size: var(--font-size);
257
+ font-weight: 500;
258
+ color: #888;
259
+ pointer-events: none;
260
+ opacity: 0;
261
+ transition: opacity 0.2s ease;
262
+ }
263
+
264
+ .simultaneous-messages:hover::before {
265
+ opacity: 1;
266
+ }
267
+
268
+ .unmerge-btn {
269
+ position: absolute !important;
270
+ right: 0 !important;
271
+ top: 6px !important;
272
+ width: 36px !important;
273
+ height: 28px !important;
274
+ border-radius: 5px !important;
275
+ border: 2px solid #d63031 !important;
276
+ background: white !important;
277
+ cursor: pointer !important;
278
+ font-size: var(--font-size) !important;
279
+ font-weight: bold !important;
280
+ color: #d63031 !important;
281
+ display: flex !important;
282
+ align-items: center !important;
283
+ justify-content: center !important;
284
+ transition: all 0.2s ease !important;
285
+ padding: 0 !important;
286
+ line-height: 1 !important;
287
+ z-index: 1000 !important;
288
+ flex: none !important;
289
+ pointer-events: auto !important;
290
+ box-shadow: 0 2px 6px rgba(214, 48, 49, 0.3) !important;
291
+ opacity: 0 !important;
292
+ }
293
+
294
+ .simultaneous-messages:hover .unmerge-btn {
295
+ opacity: 1 !important;
296
+ }
297
+
298
+ .unmerge-btn:hover {
299
+ background: #ffe5e5 !important;
300
+ border-color: #b71c1c !important;
301
+ transform: scale(1.1) !important;
302
+ box-shadow: 0 3px 8px rgba(214, 48, 49, 0.4) !important;
303
+ }
304
+
305
+ .unmerge-btn:active {
306
+ transform: scale(0.95) !important;
307
+ background: #ffcccc !important;
308
+ }
309
+
310
+ .chat-message-content {
311
+ max-width: 72%;
312
+ display: flex;
313
+ flex-direction: column;
314
+ gap: 2px;
315
+ }
316
+
317
+ .chat-message.agent-alice .chat-message-content {
318
+ align-items: flex-start;
319
+ }
320
+
321
+ .chat-message.agent-bob .chat-message-content {
322
+ align-items: flex-end;
323
+ margin-left: auto;
324
+ }
325
+
326
+ .chat-bubble {
327
+ padding: 6px 10px;
328
+ border-radius: 10px;
329
+ word-wrap: break-word;
330
+ position: relative;
331
+ box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
332
+ line-height: 1.4;
333
+ }
334
+
335
+ .chat-message.agent-alice .chat-bubble {
336
+ background: var(--alice-bg);
337
+ border: 2px solid var(--alice-border);
338
+ border-radius: 10px 10px 10px 2px;
339
+ }
340
+
341
+ .chat-message.agent-bob .chat-bubble {
342
+ background: var(--bob-bg);
343
+ border: 2px solid var(--bob-border);
344
+ border-radius: 10px 10px 2px 10px;
345
+ }
346
+
347
+ .chat-message.role-user .chat-bubble {
348
+ background: var(--user-bg);
349
+ border: 2px solid #d0d0d0;
350
+ }
351
+
352
+ .chat-header {
353
+ display: flex;
354
+ align-items: center;
355
+ gap: 4px;
356
+ margin-bottom: 2px;
357
+ font-size: var(--font-size);
358
+ font-weight: 600;
359
+ line-height: 1.2;
360
+ }
361
+
362
+ .chat-message.agent-alice .chat-header {
363
+ color: var(--alice-border);
364
+ }
365
+
366
+ .chat-message.agent-bob .chat-header {
367
+ color: var(--bob-border);
368
+ }
369
+
370
+ .chat-timestamp {
371
+ font-size: var(--font-size);
372
+ color: var(--muted-text);
373
+ margin-top: 1px;
374
+ opacity: 0.75;
375
+ }
376
+
377
+ .chat-reward {
378
+ display: inline-flex;
379
+ align-items: center;
380
+ background: linear-gradient(90deg, #fffdf2 0%, #ffffff 75%);
381
+ color: #000000;
382
+ font-weight: 600;
383
+ font-size: var(--font-size);
384
+ padding: 1px 5px;
385
+ border-radius: 3px;
386
+ border: 1px solid #f4e6a8;
387
+ margin-left: 4px;
388
+ line-height: 1.3;
389
+ }
390
+
391
+ .chat-reasoning {
392
+ font-size: var(--font-size);
393
+ font-style: italic;
394
+ color: #555;
395
+ margin-bottom: 2px;
396
+ padding: 4px 8px;
397
+ background: rgba(0, 0, 0, 0.03);
398
+ border-radius: 5px;
399
+ cursor: pointer;
400
+ line-height: 1.3;
401
+ }
402
+
403
+ .chat-reasoning.collapsed .reasoning-text {
404
+ display: none;
405
+ }
406
+
407
+ .chat-reasoning.collapsed::after {
408
+ content: ' (click to expand)';
409
+ color: #777;
410
+ }
411
+
412
+ .chat-group-divider {
413
+ display: flex;
414
+ align-items: center;
415
+ gap: 8px;
416
+ width: 100%;
417
+ margin: 8px 0 4px 0;
418
+ position: relative;
419
+ cursor: pointer;
420
+ user-select: none;
421
+ }
422
+
423
+ .chat-group-divider::before,
424
+ .chat-group-divider::after {
425
+ content: "";
426
+ flex: 1 1 auto;
427
+ height: 2px;
428
+ background: linear-gradient(90deg, rgba(224,230,235,0), var(--accent-muted-2) 30%, var(--accent-muted-2) 70%, rgba(224,230,235,0));
429
+ }
430
+
431
+ .chat-group-label {
432
+ display: inline-block;
433
+ background: white;
434
+ padding: 2px 12px;
435
+ border-radius: 999px;
436
+ font-size: var(--font-size);
437
+ font-weight: 700;
438
+ color: var(--muted-text);
439
+ border: 1.5px solid var(--accent-muted);
440
+ box-shadow: 0 1px 3px rgba(0, 0, 0, 0.08);
441
+ line-height: 1.4;
442
+ position: relative;
443
+ transition: background 0.2s ease;
444
+ }
445
+
446
+ .chat-group-divider:hover .chat-group-label {
447
+ background: var(--panel-bg);
448
+ }
449
+
450
+ .chat-group-label::before {
451
+ content: '▼ ';
452
+ font-size: 0.8em;
453
+ display: inline-block;
454
+ transition: transform 0.2s ease;
455
+ opacity: 0;
456
+ }
457
+
458
+ .chat-group-divider:hover .chat-group-label::before {
459
+ opacity: 1;
460
+ }
461
+
462
+ .chat-group-divider.collapsed .chat-group-label::before {
463
+ content: '▶ ';
464
+ opacity: 1;
465
+ }
466
+
467
+ .chat-group-divider.collapsed + * {
468
+ display: none !important;
469
+ }
470
+
471
+ /* Hide collapsed rounds in strong hide mode */
472
+ .strong-hide .chat-group-divider.collapsed {
473
+ display: none !important;
474
+ }
475
+
476
+ /* Chat view width control */
477
+ #flow-chat {
478
+ --chat-width: 900px;
479
+ max-width: var(--chat-width);
480
+ margin: 0 auto;
481
+ }
482
+
483
+ /* Hide user messages when toggle is on */
484
+ #flow-chat.hide-user-messages .chat-message.role-user {
485
+ display: none;
486
+ }
487
+
488
+ /* Hide rewards when hiding user messages */
489
+ #flow-chat.hide-user-messages .chat-reward {
490
+ display: none;
491
+ }
492
+
493
+ /* Round context annotations */
494
+ .round-context {
495
+ text-align: center;
496
+ margin: 4px auto;
497
+ max-width: 100%;
498
+ }
499
+
500
+ .round-context-edit {
501
+ min-height: 20px;
502
+ padding: 5px 10px;
503
+ border: 1.5px dashed var(--accent-muted);
504
+ border-radius: 6px;
505
+ background: #fafafa;
506
+ cursor: text;
507
+ transition: all 0.2s ease;
508
+ outline: none;
509
+ font-size: var(--font-size);
510
+ line-height: 1.3;
511
+ user-select: text;
512
+ -webkit-user-select: text;
513
+ -moz-user-select: text;
514
+ -ms-user-select: text;
515
+ }
516
+
517
+ .round-context-edit:focus {
518
+ border-style: solid;
519
+ border-color: var(--accent-muted-2);
520
+ background: #ffffff;
521
+ box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1);
522
+ }
523
+
524
+ .round-context-edit:empty:before {
525
+ content: attr(data-placeholder);
526
+ color: #999;
527
+ font-style: italic;
528
+ }
529
+
530
+ .round-context-controls {
531
+ display: none;
532
+ justify-content: center;
533
+ gap: 4px;
534
+ margin-top: 4px;
535
+ flex-wrap: wrap;
536
+ }
537
+
538
+ .round-context-edit:focus + .round-context-controls,
539
+ .round-context-controls:hover,
540
+ .round-context:focus-within .round-context-controls {
541
+ display: flex;
542
+ }
543
+
544
+ .context-color-btn {
545
+ width: 22px;
546
+ height: 22px;
547
+ border-radius: 50%;
548
+ border: 1.5px solid #fff;
549
+ box-shadow: 0 1px 2px rgba(0, 0, 0, 0.15);
550
+ cursor: pointer;
551
+ transition: transform 0.1s ease;
552
+ }
553
+
554
+ .context-color-btn:hover {
555
+ transform: scale(1.15);
556
+ }
557
+
558
+ .context-color-btn:active {
559
+ transform: scale(0.95);
560
+ }
561
+
562
+ /* Split agent context boxes */
563
+ .split-agent-context {
564
+ display: flex;
565
+ gap: 6px;
566
+ margin: 4px auto;
567
+ max-width: 100%;
568
+ align-items: flex-start;
569
+ }
570
+
571
+ .agent-context-box {
572
+ flex: 1;
573
+ min-width: 0;
574
+ position: relative;
575
+ }
576
+
577
+ .agent-context-box .round-context-edit {
578
+ margin: 0;
579
+ border-radius: 6px;
580
+ padding: 4px 8px;
581
+ min-height: 18px;
582
+ }
583
+
584
+ .agent-context-box.agent-alice .round-context-edit {
585
+ border-color: var(--alice-border);
586
+ background: rgba(14, 178, 36, 0.03);
587
+ }
588
+
589
+ .agent-context-box.agent-bob .round-context-edit {
590
+ border-color: var(--bob-border);
591
+ background: rgba(239, 131, 35, 0.03);
592
+ }
593
+
594
+ .agent-context-box.agent-alice .round-context-edit:focus {
595
+ border-color: var(--alice-border);
596
+ box-shadow: 0 2px 8px rgba(14, 178, 36, 0.2);
597
+ background: rgba(14, 178, 36, 0.05);
598
+ }
599
+
600
+ .agent-context-box.agent-bob .round-context-edit:focus {
601
+ border-color: var(--bob-border);
602
+ box-shadow: 0 2px 8px rgba(239, 131, 35, 0.2);
603
+ background: rgba(239, 131, 35, 0.05);
604
+ }
605
+
606
+ .agent-context-box .round-context-edit::before {
607
+ font-weight: 700;
608
+ font-size: var(--font-size);
609
+ margin-right: 5px;
610
+ letter-spacing: 0.2px;
611
+ }
612
+
613
+ .agent-context-box.agent-alice .round-context-edit::before {
614
+ content: 'Alice Prompt Summary:';
615
+ color: var(--alice-border);
616
+ }
617
+
618
+ .agent-context-box.agent-bob .round-context-edit::before {
619
+ content: 'Bob Prompt Summary:';
620
+ color: var(--bob-border);
621
+ }
622
+
623
+ /* Empty context boxes will be hidden by JavaScript when strong hide is enabled */
624
+ .messages-flow { display: block; }
625
+ .split-wrapper { display: flex; gap: 4px; align-items: flex-start; position: relative; }
626
+ .split-col { flex:1 1 0; min-width:0; }
627
+ /* In split view keep same inline density as linear view */
628
+ .split-col .chat-turn { display: inline; }
629
+ .split-wrapper.resizing { user-select: none; }
630
+ .split-resizer { width:4px; cursor: col-resize; flex:0 0 auto; align-self: stretch; position: relative; background: linear-gradient(90deg, rgba(224,230,235,0), var(--accent-muted-2) 30%, var(--accent-muted-2) 70%, rgba(224,230,235,0)); border-radius:2px; transition: background .15s ease, width .15s ease; }
631
+ .split-resizer:hover { background: linear-gradient(90deg, rgba(224,230,235,0), var(--accent-muted) 35%, var(--accent-muted) 65%, rgba(224,230,235,0)); }
632
+ .split-resizer.dragging { background: linear-gradient(90deg, rgba(224,230,235,0), var(--accent-muted) 25%, var(--accent-muted) 75%, rgba(224,230,235,0)); }
633
+ /* Inline reasoning (removed toggle to prevent layout shift on click) */
634
+ .reasoning-inline { display:inline; font-size:var(--font-size); font-style:italic; color:#555; white-space:pre-wrap; margin-right:4px; cursor:pointer; position:relative; }
635
+ .reasoning-inline .reasoning-text { display:inline; }
636
+ .reasoning-inline .reasoning-icon { display:inline-block; margin-right:2px; }
637
+ .reasoning-inline.collapsed .reasoning-text { display:none; }
638
+ .reasoning-inline.collapsed::after { content:'(...)'; font-style:italic; color:#777; margin-left:4px; }
639
+ .message-box .main-content { white-space:normal; }
640
+ /* tighten spacing */
641
+ .split-col .group-divider { margin:4px 0 2px 0; }
642
+ .toolbar {
643
+ display: flex;
644
+ align-items: center;
645
+ gap: 8px;
646
+ margin-bottom: 0;
647
+ font-size: var(--font-size);
648
+ max-height: 0;
649
+ overflow: hidden;
650
+ opacity: 0;
651
+ pointer-events: none;
652
+ transition: max-height 0.2s ease, opacity 0.2s ease;
653
+ flex-wrap: wrap;
654
+ }
655
+ .toolbar-wrap { position: sticky; top: 0; z-index: 10; background: var(--bg); }
656
+ .toolbar-hotzone { height: 6px; }
657
+ .toolbar-wrap:hover .toolbar { max-height: 500px; opacity: 1; pointer-events: auto; margin-bottom: 12px; }
658
+ .toolbar * { pointer-events: auto !important; }
659
+ .toolbar input,
660
+ .toolbar select { z-index: 100 !important; position: relative; }
661
+ .toolbar input[type="number"],
662
+ .toolbar input[type="text"],
663
+ .toolbar select {
664
+ width: 72px;
665
+ padding: 2px 6px;
666
+ border: 1px solid var(--accent-muted);
667
+ border-radius: var(--corner-radius);
668
+ background: var(--bg);
669
+ user-select: text !important;
670
+ -webkit-user-select: text !important;
671
+ -moz-user-select: text !important;
672
+ -ms-user-select: text !important;
673
+ pointer-events: auto !important;
674
+ cursor: pointer !important;
675
+ }
676
+ .toolbar input[type="text"] {
677
+ cursor: text !important;
678
+ }
679
+ .toolbar input[type="text"]:focus,
680
+ .toolbar input[type="number"]:focus,
681
+ .toolbar select:focus {
682
+ outline: 2px solid #0066cc;
683
+ outline-offset: 1px;
684
+ }
685
+ .toolbar button {
686
+ padding: 4px 8px;
687
+ border: 1px solid var(--accent-muted);
688
+ background: var(--panel-bg);
689
+ border-radius: var(--corner-radius);
690
+ cursor: pointer;
691
+ }
692
+ .chat-turn {
693
+ display: inline; /* inline like text */
694
+ background: transparent;
695
+ position: relative;
696
+ cursor: pointer;
697
+ }
698
+ /* No agent-specific background distinctions */
699
+ .turn-content {
700
+ white-space: normal;
701
+ color: var(--text);
702
+ font-size: var(--font-size);
703
+ display: inline; /* inline flow */
704
+ }
705
+ .chat-turn .agent-badge { margin-right: 0; vertical-align: baseline; }
706
+ .agent-badge {
707
+ display: inline;
708
+ position: relative;
709
+ border: var(--border-width) solid var(--accent-muted); /* slightly thicker */
710
+ border-radius: var(--pill-radius-left); /* round left and bottom-right */
711
+ font-size: var(--font-size);
712
+ color: var(--muted-text);
713
+ background: var(--panel-bg);
714
+ box-shadow: var(--inset-shadow);
715
+ line-height: 1.2;
716
+ border-right: 0;
717
+ }
718
+ /* Use flex on assistant badges to vertically center reward pill */
719
+ .chat-turn.role-assistant .agent-badge { display: inline-flex; align-items: center; }
720
+ .agent-badge::after {
721
+ content: none;
722
+ }
723
+ /* removed external separator; emoji is rendered inside message bubble */
724
+ .agent-name { font-weight: 700; }
725
+ .emoji-bw { filter: grayscale(100%); opacity: 0.95; font-size: var(--font-size); vertical-align: baseline; margin: 0; position: relative; top: -1px; line-height: 1; display: inline-block; }
726
+ .ts-badge {
727
+ position: relative;
728
+ display: inline;
729
+ border: var(--border-width) solid var(--accent-muted-2); /* slightly thicker */
730
+ border-radius: var(--corner-radius); /* not a pill */
731
+ font-size: var(--font-size);
732
+ # font-weight: 700;
733
+ color: var(--muted-text);
734
+ background: #F4F8FB; /* subtle tint */
735
+ # padding: 1px 6px; /* slight padding for visibility */
736
+ margin-right: 8px; /* small gap from following content */
737
+ pointer-events: auto; /* allow events so we can ignore them in JS */
738
+ }
739
+ /* Hide timestep badges when grouping by 1 */
740
+ .hide-ts-badges .ts-badge { display: none; }
741
+ /* Strong hide: completely hide collapsed turns */
742
+ .strong-hide .chat-turn.collapsed { display: none; }
743
+ .ts-badge::before {
744
+ content: "";
745
+ position: relative;
746
+ background: var(--accent-muted-2);
747
+ border-radius: 2px;
748
+ }
749
+ .agent-badge { margin-left: 6px; }
750
+ .message-box {
751
+ display: inline; /* inline bubble behaving like text */
752
+ font-size: var(--font-size);
753
+ border: var(--border-width) solid var(--accent-muted);
754
+ border-radius: var(--pill-radius-right); /* round left and bottom-right */
755
+ position: relative;
756
+ background: var(--bg);
757
+ vertical-align: baseline;
758
+ line-height: 1.2;
759
+ padding-left: 0;
760
+ border-left: 0;
761
+ }
762
+ .chat-turn.agent-alice.role-assistant .message-box::before { color: #0eb224; }
763
+ .chat-turn.agent-bob.role-assistant .message-box::before { color: #ef8323; }
764
+ .chat-turn.collapsed .message-box::before { display: none; }
765
+ /* Assistant bubble border colors by common agent names */
766
+ .chat-turn.agent-alice.role-assistant .message-box { border-color: #0eb224; }
767
+ .chat-turn.agent-bob.role-assistant .message-box { border-color: #ef8323; }
768
+ /* Tie badge and seam to agent color for a cohesive capsule, assistants only */
769
+ .chat-turn.agent-alice.role-assistant .agent-badge { border-color: #0eb224; background: rgba(14,178,36,0.08); }
770
+ .chat-turn.agent-alice.role-assistant .agent-badge::after { border-right-color: #0eb224; }
771
+ .chat-turn.agent-alice.role-assistant .turn-content::before { border-left-color: #0eb224; border-top-color: #0eb224; }
772
+ .chat-turn.agent-alice.role-assistant .message-box { border-color: #0eb224; }
773
+
774
+ .chat-turn.agent-bob.role-assistant .agent-badge { border-color: #ef8323; background: rgba(239,131,35,0.10); }
775
+ .chat-turn.agent-bob.role-assistant .agent-badge::after { border-right-color: #ef8323; }
776
+ .chat-turn.agent-bob.role-assistant .turn-content::before { border-left-color: #ef8323; border-top-color: #ef8323; }
777
+ .chat-turn.agent-bob.role-assistant .message-box { border-color: #ef8323; }
778
+ /* No colored agent-name; keep neutral */
779
+ .reward {
780
+ display: inline-flex;
781
+ align-items: center;
782
+ justify-content: center;
783
+ background: linear-gradient(90deg, #fffdf2 0%, #ffffff 75%);
784
+ color: #000000; /* full black */
785
+ font-weight: 600; /* slightly bolder */
786
+ font-family: "Inter", ui-sans-serif, system-ui, -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Oxygen, Ubuntu, Cantarell, "Fira Sans", "Droid Sans", "Helvetica Neue", Arial, "Noto Sans", sans-serif;
787
+ font-size: var(--font-size);
788
+ letter-spacing: 0.15px;
789
+ line-height: 1;
790
+ padding: 0 4px 1px 4px; /* slight bottom pad for optical centering */
791
+ border-radius: 4px;
792
+ border: 1px solid #f4e6a8;
793
+ margin: 0 4px;
794
+ box-shadow: 0 0 0 1px rgba(255,255,255,0.55) inset, 0 1px 2px rgba(0,0,0,0.04);
795
+ }
796
+ .message-placeholder { display: none; color: #7f8c8d; font-style: italic; }
797
+ .chat-turn.collapsed .message-box { color: transparent; font-size: 0; display: inline-block; }
798
+ .chat-turn.collapsed .message-box::after { content: "(...)"; color: #7f8c8d; font-style: italic; font-size: var(--font-size); line-height: 1.2; }
799
+ .chat-turn.collapsed .agent-badge,
800
+ .chat-turn.collapsed .message-box { opacity: 0.3; }
801
+ /* Group divider - clearer and pretty */
802
+ .group-divider {
803
+ display: flex;
804
+ align-items: center;
805
+ gap: 8px;
806
+ width: 100%;
807
+ margin: 8px 0 4px 0;
808
+ position: relative;
809
+ cursor: pointer;
810
+ user-select: none;
811
+ }
812
+ .group-divider::before,
813
+ .group-divider::after {
814
+ content: "";
815
+ flex: 1 1 auto;
816
+ height: 2px;
817
+ background: linear-gradient(90deg, rgba(224,230,235,0), var(--accent-muted-2) 30%, var(--accent-muted-2) 70%, rgba(224,230,235,0));
818
+ }
819
+ .group-divider .group-label {
820
+ display: inline-block;
821
+ border: 1px solid var(--accent-muted);
822
+ border-radius: 999px;
823
+ padding: 2px 10px;
824
+ font-size: var(--group-label-font-size);
825
+ font-weight: 700;
826
+ color: var(--muted-text);
827
+ background: var(--bg);
828
+ box-shadow: var(--inset-shadow);
829
+ position: relative;
830
+ z-index: 1;
831
+ transition: background 0.2s ease;
832
+ }
833
+
834
+ .group-divider:hover .group-label {
835
+ background: var(--panel-bg);
836
+ }
837
+
838
+ .group-label::before {
839
+ content: '▼ ';
840
+ font-size: 0.8em;
841
+ display: inline-block;
842
+ transition: transform 0.2s ease;
843
+ opacity: 0;
844
+ }
845
+
846
+ .group-divider:hover .group-label::before {
847
+ opacity: 1;
848
+ }
849
+
850
+ .group-divider.collapsed .group-label::before {
851
+ content: '▶ ';
852
+ opacity: 1;
853
+ }
854
+
855
+ /* Hide collapsed rounds in strong hide mode */
856
+ .strong-hide .group-divider.collapsed {
857
+ display: none !important;
858
+ }
859
+ /* Enhance contrast for print / export */
860
+ body.split-mode .group-divider::before,
861
+ body.split-mode .group-divider::after {
862
+ background: linear-gradient(90deg, rgba(224,230,235,0), var(--accent-muted) 25%, var(--accent-muted) 75%, rgba(224,230,235,0));
863
+ }
864
+ .chat-turn .turn-content { position: relative; }
865
+ .chat-turn .turn-content::before {
866
+ content: none;
867
+ }
868
+ .chat-turn .agent-badge {
869
+ position: relative;
870
+ }
871
+ /* removed absolute-positioned emoji to prevent overlap */
872
+ </style>
873
+ """
874
+
875
+ # HTML structure
876
+ html_parts = [
877
+ "<!DOCTYPE html>",
878
+ "<html>",
879
+ "<head>",
880
+ "<meta charset='UTF-8'>",
881
+ "<title>Chat Turns</title>",
882
+ css,
883
+ "<script>\n"
884
+ "document.addEventListener('DOMContentLoaded', function() {\n"
885
+ " const linearFlow = document.getElementById('flow-linear');\n"
886
+ " const splitFlow = document.getElementById('flow-split');\n"
887
+ " const chatFlow = document.getElementById('flow-chat');\n"
888
+ " let splitViewOn = false;\n"
889
+ " let chatViewOn = true;\n"
890
+ " function activeFlows() { return [chatViewOn && chatFlow ? chatFlow : null, splitViewOn && splitFlow ? splitFlow : null, linearFlow].filter(Boolean).filter(f => f.style.display !== 'none'); }\n"
891
+ " // State for range filtering and strong hide\n"
892
+ " let currentRangeStart = null;\n"
893
+ " let currentRangeEnd = null;\n"
894
+ " let strongHideOn = false;\n"
895
+ " document.body.addEventListener('click', function(e){\n"
896
+ " if (e.target.closest('input, textarea, select, button, .round-context-edit, .toolbar')) { return; }\n"
897
+ " if (e.target.closest('.ts-badge')) { return; }\n"
898
+ " const r = e.target.closest('.reasoning-inline'); if (r) { e.stopPropagation(); r.classList.toggle('collapsed'); return; }\n"
899
+ " const turn = e.target.closest('.chat-turn');\n"
900
+ " if (turn) { e.stopPropagation(); turn.classList.toggle('collapsed'); }\n"
901
+ " });\n"
902
+ " // Reasoning handled via <details>, no JS required\n"
903
+ " function applyRangeFilter() {\n"
904
+ " for (const flow of activeFlows()) {\n"
905
+ " const turns = Array.from(flow.querySelectorAll('.chat-turn'));\n"
906
+ " for (const el of turns) {\n"
907
+ " const t = parseInt(el.getAttribute('data-time-step') || '0', 10);\n"
908
+ " const afterStart = (currentRangeStart === null) || (t >= currentRangeStart);\n"
909
+ " const beforeEnd = (currentRangeEnd === null) || (t <= currentRangeEnd);\n"
910
+ " el.style.display = (afterStart && beforeEnd) ? '' : 'none';\n"
911
+ " }\n"
912
+ " const dividers = Array.from(flow.querySelectorAll('.group-divider'));\n"
913
+ " for (const d of dividers) {\n"
914
+ " let anyVisible = false;\n"
915
+ " let el = d.nextElementSibling;\n"
916
+ " while (el && !el.classList.contains('group-divider')) {\n"
917
+ " if (el.classList.contains('chat-turn')) {\n"
918
+ " const disp = getComputedStyle(el).display;\n"
919
+ " if (disp !== 'none') { anyVisible = true; break; }\n"
920
+ " } else if (el.classList.contains('split-wrapper')) {\n"
921
+ " // Search descendants for any visible chat-turn\n"
922
+ " const turns = Array.from(el.querySelectorAll('.chat-turn'));\n"
923
+ " for (const tEl of turns) {\n"
924
+ " const disp2 = getComputedStyle(tEl).display;\n"
925
+ " if (disp2 !== 'none') { anyVisible = true; break; }\n"
926
+ " }\n"
927
+ " if (anyVisible) break;\n"
928
+ " }\n"
929
+ " el = el.nextElementSibling;\n"
930
+ " }\n"
931
+ " d.style.display = anyVisible ? '' : 'none';\n"
932
+ " }\n"
933
+ " }\n"
934
+ " }\n"
935
+ " function applyGrouping(n) {\n"
936
+ " function groupContainer(container, n) {\n"
937
+ " Array.from(container.querySelectorAll(':scope > .group-divider')).forEach(el => el.remove());\n"
938
+ " if (!n || n <= 0) { return; }\n"
939
+ " const turns = Array.from(container.querySelectorAll(':scope > .chat-turn'));\n"
940
+ " if (turns.length === 0) return;\n"
941
+ " const items = Array.from(container.children).filter(el => !el.classList.contains('group-divider'));\n"
942
+ " const frag = document.createDocumentFragment();\n"
943
+ " let lastGroup = -1;\n"
944
+ " for (const el of items) {\n"
945
+ " if (!el.classList.contains('chat-turn')) { frag.appendChild(el); continue; }\n"
946
+ " const t = parseInt(el.getAttribute('data-time-step') || '0', 10);\n"
947
+ " const g = Math.floor(t / n);\n"
948
+ " if (g !== lastGroup) {\n"
949
+ " const div = document.createElement('div');\n"
950
+ " div.className = 'group-divider';\n"
951
+ " const label = document.createElement('span');\n"
952
+ " label.className = 'group-label';\n"
953
+ " const roundIndex = g + 1;\n"
954
+ " label.textContent = `Round ${roundIndex}`;\n"
955
+ " div.appendChild(label);\n"
956
+ " frag.appendChild(div);\n"
957
+ " lastGroup = g;\n"
958
+ " }\n"
959
+ " frag.appendChild(el);\n"
960
+ " }\n"
961
+ " container.innerHTML = '';\n"
962
+ " container.appendChild(frag);\n"
963
+ " container.classList.toggle('hide-ts-badges', n === 1);\n"
964
+ " container.classList.toggle('strong-hide', strongHideOn);\n"
965
+ " }\n"
966
+ " for (const flow of activeFlows()) {\n"
967
+ " if (flow.id === 'flow-split') {\n"
968
+ " // Snapshot original turns once to avoid drift on repeated grouping\n"
969
+ " const getOriginalTurns = () => {\n"
970
+ " if (!flow.dataset.origData) {\n"
971
+ " const data = [];\n"
972
+ " const cols0 = flow.querySelectorAll('.split-col');\n"
973
+ " cols0.forEach(col => {\n"
974
+ " const agent = col.getAttribute('data-agent') || '';\n"
975
+ " col.querySelectorAll(':scope > .chat-turn').forEach(el => {\n"
976
+ " const t = parseInt(el.getAttribute('data-time-step')||'0',10);\n"
977
+ " data.push({agent, time:t, html: el.outerHTML});\n"
978
+ " });\n"
979
+ " });\n"
980
+ " flow.dataset.origData = JSON.stringify(data);\n"
981
+ " }\n"
982
+ " return JSON.parse(flow.dataset.origData);\n"
983
+ " };\n"
984
+ " const original = getOriginalTurns();\n"
985
+ " const agents = Array.from(new Set(original.map(o => o.agent))).sort();\n"
986
+ " const groups = new Map();\n"
987
+ " original.forEach(o => {\n"
988
+ " const g = n && n > 0 ? Math.floor(o.time / n) : 0;\n"
989
+ " if (!groups.has(g)) groups.set(g, new Map());\n"
990
+ " const gm = groups.get(g);\n"
991
+ " if (!gm.has(o.agent)) gm.set(o.agent, []);\n"
992
+ " gm.get(o.agent).push(o);\n"
993
+ " });\n"
994
+ " flow.innerHTML = '';\n"
995
+ " const sorted = Array.from(groups.keys()).sort((a,b)=>a-b);\n"
996
+ " sorted.forEach(g => {\n"
997
+ " const div = document.createElement('div');\n"
998
+ " div.className = 'group-divider';\n"
999
+ " const label = document.createElement('span');\n"
1000
+ " label.className = 'group-label';\n"
1001
+ " label.textContent = `Round ${g+1}`;\n"
1002
+ " div.appendChild(label);\n"
1003
+ " flow.appendChild(div);\n"
1004
+ " const wrapper = document.createElement('div');\n"
1005
+ " wrapper.className = 'split-wrapper';\n"
1006
+ " agents.forEach(agent => {\n"
1007
+ " const colDiv = document.createElement('div');\n"
1008
+ " colDiv.className = 'split-col';\n"
1009
+ " colDiv.setAttribute('data-agent', agent);\n"
1010
+ " (groups.get(g).get(agent) || []).forEach(o => { colDiv.insertAdjacentHTML('beforeend', o.html); });\n"
1011
+ " wrapper.appendChild(colDiv);\n"
1012
+ " });\n"
1013
+ " if (wrapper.children.length === 2) { const res = document.createElement('div'); res.className='split-resizer'; wrapper.insertBefore(res, wrapper.children[1]); }\n"
1014
+ " flow.appendChild(wrapper);\n"
1015
+ " });\n"
1016
+ " flow.classList.toggle('hide-ts-badges', n === 1);\n"
1017
+ " flow.classList.toggle('strong-hide', strongHideOn);\n"
1018
+ " document.body.classList.add('split-mode');\n"
1019
+ " } else {\n"
1020
+ " groupContainer(flow, n);\n"
1021
+ " }\n"
1022
+ " }\n"
1023
+ " applyRangeFilter();\n"
1024
+ " initSplitResizers();\n"
1025
+ " }\n"
1026
+ " function initSplitResizers() {\n"
1027
+ " const wrappers = document.querySelectorAll('#flow-split .split-wrapper');\n"
1028
+ " wrappers.forEach(wrap => {\n"
1029
+ " const resizer = wrap.querySelector('.split-resizer');\n"
1030
+ " if (!resizer || resizer.dataset.bound) return; resizer.dataset.bound='1';\n"
1031
+ " const cols = wrap.querySelectorAll('.split-col'); if (cols.length !== 2) return; const c0=cols[0], c1=cols[1];\n"
1032
+ " c0.style.flex=c1.style.flex='1 1 0'; c0.style.width=c1.style.width='';\n"
1033
+ " requestAnimationFrame(()=>{ const w0=c0.scrollWidth,w1=c1.scrollWidth,total=w0+w1||1; let p0=w0/total,p1=w1/total; const minP=0.25,maxP=0.75; if(p0<minP){p0=minP;p1=1-p0;} else if(p0>maxP){p0=maxP;p1=1-p0;} c0.style.flex='0 0 '+(p0*100).toFixed(2)+'%'; c1.style.flex='0 0 '+(p1*100).toFixed(2)+'%'; });\n"
1034
+ " let dragging=false,startX=0,startP0=0;\n"
1035
+ " const onDown=e=>{ dragging=true; startX=e.clientX; wrap.classList.add('resizing'); resizer.classList.add('dragging'); const rect=wrap.getBoundingClientRect(); const w=rect.width; const c0Rect=c0.getBoundingClientRect(); startP0=c0Rect.width/w; document.body.style.cursor='col-resize'; e.preventDefault(); };\n"
1036
+ " const onMove=e=>{ if(!dragging)return; const rect=wrap.getBoundingClientRect(); const w=rect.width; let delta=(e.clientX-startX)/w; let newP0=startP0+delta; const minP=0.15,maxP=0.85; if(newP0<minP)newP0=minP; if(newP0>maxP)newP0=maxP; c0.style.flex='0 0 '+(newP0*100).toFixed(2)+'%'; c1.style.flex='0 0 '+((1-newP0)*100).toFixed(2)+'%'; };\n"
1037
+ " const onUp=()=>{ if(!dragging)return; dragging=false; wrap.classList.remove('resizing'); resizer.classList.remove('dragging'); document.body.style.cursor=''; };\n"
1038
+ " resizer.addEventListener('mousedown', onDown); window.addEventListener('mousemove', onMove); window.addEventListener('mouseup', onUp);\n"
1039
+ " resizer.addEventListener('dblclick', e=>{ if(e.shiftKey){ c0.style.flex=c1.style.flex='1 1 0'; requestAnimationFrame(()=>{ const w0=c0.scrollWidth,w1=c1.scrollWidth,total=w0+w1||1; let p0=w0/total,p1=w1/total; const minP=0.25,maxP=0.75; if(p0<minP){p0=minP;p1=1-p0;} else if(p0>maxP){p0=maxP;p1=1-p0;} c0.style.flex='0 0 '+(p0*100).toFixed(2)+'%'; c1.style.flex='0 0 '+(p1*100).toFixed(2)+'%'; }); } else { c0.style.flex='0 0 50%'; c1.style.flex='0 0 50%'; } });\n"
1040
+ " });\n"
1041
+ " }\n"
1042
+ " initSplitResizers();\n"
1043
+ " const input = document.getElementById('group-size');\n"
1044
+ " const btn = document.getElementById('apply-grouping');\n"
1045
+ " if (btn && input) {\n"
1046
+ " btn.addEventListener('click', () => { const n = parseInt(input.value || '0', 10); applyGrouping(n); });\n"
1047
+ " input.addEventListener('keydown', (e) => { if (e.key === 'Enter') { const n = parseInt(input.value || '0', 10); applyGrouping(n); } });\n"
1048
+ " }\n"
1049
+ " if (input) { input.value = '1'; applyGrouping(1); }\n"
1050
+ " const rangeStart = document.getElementById('range-start');\n"
1051
+ " const rangeEnd = document.getElementById('range-end');\n"
1052
+ " const rangeBtn = document.getElementById('apply-range');\n"
1053
+ " if (rangeBtn && rangeStart && rangeEnd) {\n"
1054
+ " const applyRange = () => {\n"
1055
+ " const sv = parseInt(rangeStart.value || '', 10);\n"
1056
+ " const ev = parseInt(rangeEnd.value || '', 10);\n"
1057
+ " currentRangeStart = Number.isFinite(sv) ? sv : null;\n"
1058
+ " currentRangeEnd = Number.isFinite(ev) ? ev : null;\n"
1059
+ " applyRangeFilter();\n"
1060
+ " };\n"
1061
+ " rangeBtn.addEventListener('click', applyRange);\n"
1062
+ " rangeStart.addEventListener('keydown', (e) => { if (e.key === 'Enter') applyRange(); });\n"
1063
+ " rangeEnd.addEventListener('keydown', (e) => { if (e.key === 'Enter') applyRange(); });\n"
1064
+ " }\n"
1065
+ " const strongHideBtn = document.getElementById('toggle-strong-hide');\n"
1066
+ " const strongHideStateEl = document.getElementById('strong-hide-state');\n"
1067
+ " if (strongHideBtn) {\n"
1068
+ " const setLabel = () => { if (strongHideStateEl) { strongHideStateEl.textContent = strongHideOn ? 'On' : 'Off'; } };\n"
1069
+ " strongHideBtn.addEventListener('click', () => { strongHideOn = !strongHideOn; for (const f of activeFlows()) { f.classList.toggle('strong-hide', strongHideOn); } setLabel(); });\n"
1070
+ " if (strongHideOn) { for (const f of activeFlows()) { f.classList.add('strong-hide'); } }\n"
1071
+ " setLabel();\n"
1072
+ " }\n"
1073
+ " const splitBtn = document.getElementById('toggle-split-view');\n"
1074
+ " const splitStateEl = document.getElementById('split-view-state');\n"
1075
+ " if (splitBtn && splitFlow && linearFlow) {\n"
1076
+ " const updateSplit = () => { if (splitStateEl) splitStateEl.textContent = splitViewOn ? 'On' : 'Off'; };\n"
1077
+ " splitBtn.addEventListener('click', () => { if (chatViewOn) return; splitViewOn = !splitViewOn; linearFlow.style.display = splitViewOn ? 'none' : ''; splitFlow.style.display = splitViewOn ? '' : 'none'; applyGrouping(parseInt(input.value||'1',10)); updateSplit(); });\n"
1078
+ " updateSplit();\n"
1079
+ " }\n"
1080
+ " const chatBtn = document.getElementById('toggle-chat-view');\n"
1081
+ " const chatStateEl = document.getElementById('chat-view-state');\n"
1082
+ " const hideUserBtn = document.getElementById('toggle-hide-user-messages');\n"
1083
+ " const hideUserStateEl = document.getElementById('hide-user-state');\n"
1084
+ " const widthControl = document.getElementById('chat-width-control');\n"
1085
+ " const widthSlider = document.getElementById('chat-width-slider');\n"
1086
+ " const widthValue = document.getElementById('chat-width-value');\n"
1087
+ " let hideUserMessages = false;\n"
1088
+ " if (chatBtn && chatFlow && linearFlow) {\n"
1089
+ " const updateChat = () => {\n"
1090
+ " if (chatStateEl) chatStateEl.textContent = chatViewOn ? 'On' : 'Off';\n"
1091
+ " if (hideUserBtn) hideUserBtn.style.display = chatViewOn ? '' : 'none';\n"
1092
+ " if (widthControl) widthControl.style.display = chatViewOn ? '' : 'none';\n"
1093
+ " };\n"
1094
+ " chatBtn.addEventListener('click', () => {\n"
1095
+ " chatViewOn = !chatViewOn;\n"
1096
+ " if (chatViewOn) {\n"
1097
+ " splitViewOn = false;\n"
1098
+ " linearFlow.style.display = 'none';\n"
1099
+ " if (splitFlow) splitFlow.style.display = 'none';\n"
1100
+ " chatFlow.style.display = '';\n"
1101
+ " if (splitStateEl) splitStateEl.textContent = 'Off';\n"
1102
+ " } else {\n"
1103
+ " chatFlow.style.display = 'none';\n"
1104
+ " linearFlow.style.display = '';\n"
1105
+ " }\n"
1106
+ " updateChat();\n"
1107
+ " });\n"
1108
+ " updateChat();\n"
1109
+ " }\n"
1110
+ " if (hideUserBtn && hideUserStateEl && chatFlow) {\n"
1111
+ " const updateHideUser = () => { hideUserStateEl.textContent = hideUserMessages ? 'On' : 'Off'; };\n"
1112
+ " hideUserBtn.addEventListener('click', () => {\n"
1113
+ " hideUserMessages = !hideUserMessages;\n"
1114
+ " chatFlow.classList.toggle('hide-user-messages', hideUserMessages);\n"
1115
+ " updateHideUser();\n"
1116
+ " });\n"
1117
+ " updateHideUser();\n"
1118
+ " }\n"
1119
+ " if (widthSlider && widthValue && chatFlow) {\n"
1120
+ " const savedWidth = localStorage.getItem('chat-view-width');\n"
1121
+ " if (savedWidth) {\n"
1122
+ " widthSlider.value = savedWidth;\n"
1123
+ " chatFlow.style.setProperty('--chat-width', savedWidth + 'px');\n"
1124
+ " widthValue.textContent = savedWidth + 'px';\n"
1125
+ " }\n"
1126
+ " widthSlider.addEventListener('input', (e) => {\n"
1127
+ " const width = e.target.value;\n"
1128
+ " chatFlow.style.setProperty('--chat-width', width + 'px');\n"
1129
+ " widthValue.textContent = width + 'px';\n"
1130
+ " localStorage.setItem('chat-view-width', width);\n"
1131
+ " });\n"
1132
+ " }\n"
1133
+ " const fontFamilySelect = document.getElementById('font-family-select');\n"
1134
+ " const fontSizeInput = document.getElementById('font-size-input');\n"
1135
+ " if (fontFamilySelect) {\n"
1136
+ " const savedFont = localStorage.getItem('render-font-family');\n"
1137
+ " if (savedFont) {\n"
1138
+ " fontFamilySelect.value = savedFont;\n"
1139
+ " document.body.style.setProperty('--font-family', savedFont);\n"
1140
+ " }\n"
1141
+ " fontFamilySelect.addEventListener('change', (e) => {\n"
1142
+ " const font = e.target.value;\n"
1143
+ " document.body.style.setProperty('--font-family', font);\n"
1144
+ " localStorage.setItem('render-font-family', font);\n"
1145
+ " });\n"
1146
+ " }\n"
1147
+ " if (fontSizeInput) {\n"
1148
+ " const savedSize = localStorage.getItem('render-font-size');\n"
1149
+ " if (savedSize) {\n"
1150
+ " fontSizeInput.value = savedSize;\n"
1151
+ " document.body.style.setProperty('--font-size', savedSize + 'px');\n"
1152
+ " }\n"
1153
+ " fontSizeInput.addEventListener('input', (e) => {\n"
1154
+ " const size = e.target.value;\n"
1155
+ " document.body.style.setProperty('--font-size', size + 'px');\n"
1156
+ " localStorage.setItem('render-font-size', size);\n"
1157
+ " });\n"
1158
+ " }\n"
1159
+ " const aliceEmojiInput = document.getElementById('alice-emoji-input');\n"
1160
+ " const aliceNameInput = document.getElementById('alice-name-input');\n"
1161
+ " const bobEmojiInput = document.getElementById('bob-emoji-input');\n"
1162
+ " const bobNameInput = document.getElementById('bob-name-input');\n"
1163
+ " const applyAgentNamesBtn = document.getElementById('apply-agent-names');\n"
1164
+ " function loadAgentNames() {\n"
1165
+ " if (aliceEmojiInput && aliceNameInput && bobEmojiInput && bobNameInput) {\n"
1166
+ " const savedAliceEmoji = localStorage.getItem('alice-emoji') || '🤖';\n"
1167
+ " const savedAliceName = localStorage.getItem('alice-name') || 'Alice';\n"
1168
+ " const savedBobEmoji = localStorage.getItem('bob-emoji') || '🤖';\n"
1169
+ " const savedBobName = localStorage.getItem('bob-name') || 'Bob';\n"
1170
+ " aliceEmojiInput.value = savedAliceEmoji;\n"
1171
+ " aliceNameInput.value = savedAliceName;\n"
1172
+ " bobEmojiInput.value = savedBobEmoji;\n"
1173
+ " bobNameInput.value = savedBobName;\n"
1174
+ " applyAgentNamesToDOM(savedAliceEmoji, savedAliceName, savedBobEmoji, savedBobName);\n"
1175
+ " }\n"
1176
+ " }\n"
1177
+ " function applyAgentNamesToDOM(aliceEmoji, aliceName, bobEmoji, bobName) {\n"
1178
+ " const agentMap = { 'alice': { name: aliceName, emoji: aliceEmoji }, 'bob': { name: bobName, emoji: bobEmoji } };\n"
1179
+ " document.querySelectorAll('[data-agent-id]').forEach(el => {\n"
1180
+ " const agentId = el.getAttribute('data-agent-id');\n"
1181
+ " if (!agentMap[agentId]) return;\n"
1182
+ " if (el.classList.contains('agent-name')) {\n"
1183
+ " el.textContent = agentMap[agentId].name;\n"
1184
+ " } else if (el.classList.contains('emoji-bw')) {\n"
1185
+ " const currentEmoji = el.textContent.trim();\n"
1186
+ " if (currentEmoji === '🤖' || currentEmoji === '👤') {\n"
1187
+ " el.textContent = agentMap[agentId].emoji;\n"
1188
+ " }\n"
1189
+ " }\n"
1190
+ " });\n"
1191
+ " const style = document.createElement('style');\n"
1192
+ " style.id = 'dynamic-agent-names-style';\n"
1193
+ " const existingStyle = document.getElementById('dynamic-agent-names-style');\n"
1194
+ " if (existingStyle) existingStyle.remove();\n"
1195
+ " style.textContent = `\n"
1196
+ " .agent-context-box.agent-alice .round-context-edit::before {\n"
1197
+ " content: '${aliceName} Prompt Summary:';\n"
1198
+ " }\n"
1199
+ " .agent-context-box.agent-bob .round-context-edit::before {\n"
1200
+ " content: '${bobName} Prompt Summary:';\n"
1201
+ " }\n"
1202
+ " `;\n"
1203
+ " document.head.appendChild(style);\n"
1204
+ " }\n"
1205
+ " if (applyAgentNamesBtn && aliceEmojiInput && aliceNameInput && bobEmojiInput && bobNameInput) {\n"
1206
+ " [aliceEmojiInput, aliceNameInput, bobEmojiInput, bobNameInput].forEach(input => {\n"
1207
+ " input.style.pointerEvents = 'auto';\n"
1208
+ " if (input.tagName === 'INPUT') {\n"
1209
+ " input.style.userSelect = 'text';\n"
1210
+ " input.style.webkitUserSelect = 'text';\n"
1211
+ " input.readOnly = false;\n"
1212
+ " }\n"
1213
+ " input.disabled = false;\n"
1214
+ " const stopAll = (e) => { e.stopPropagation(); e.stopImmediatePropagation(); };\n"
1215
+ " input.addEventListener('mousedown', stopAll, true);\n"
1216
+ " input.addEventListener('mouseup', stopAll, true);\n"
1217
+ " input.addEventListener('click', stopAll, true);\n"
1218
+ " input.addEventListener('dblclick', stopAll, true);\n"
1219
+ " input.addEventListener('focus', stopAll, true);\n"
1220
+ " input.addEventListener('blur', stopAll, true);\n"
1221
+ " input.addEventListener('paste', stopAll, true);\n"
1222
+ " input.addEventListener('cut', stopAll, true);\n"
1223
+ " input.addEventListener('copy', stopAll, true);\n"
1224
+ " input.addEventListener('select', stopAll, true);\n"
1225
+ " input.addEventListener('selectstart', stopAll, true);\n"
1226
+ " input.addEventListener('keydown', stopAll, true);\n"
1227
+ " input.addEventListener('keyup', stopAll, true);\n"
1228
+ " input.addEventListener('keypress', stopAll, true);\n"
1229
+ " input.addEventListener('input', stopAll, true);\n"
1230
+ " input.addEventListener('change', stopAll, true);\n"
1231
+ " input.addEventListener('contextmenu', stopAll, true);\n"
1232
+ " });\n"
1233
+ " const applyNames = () => {\n"
1234
+ " const aliceEmoji = aliceEmojiInput.value || '🤖';\n"
1235
+ " const aliceName = aliceNameInput.value.trim() || 'Alice';\n"
1236
+ " const bobEmoji = bobEmojiInput.value || '🤖';\n"
1237
+ " const bobName = bobNameInput.value.trim() || 'Bob';\n"
1238
+ " localStorage.setItem('alice-emoji', aliceEmoji);\n"
1239
+ " localStorage.setItem('alice-name', aliceName);\n"
1240
+ " localStorage.setItem('bob-emoji', bobEmoji);\n"
1241
+ " localStorage.setItem('bob-name', bobName);\n"
1242
+ " applyAgentNamesToDOM(aliceEmoji, aliceName, bobEmoji, bobName);\n"
1243
+ " };\n"
1244
+ " applyAgentNamesBtn.addEventListener('click', applyNames);\n"
1245
+ " [aliceNameInput, bobNameInput].forEach(input => {\n"
1246
+ " input.addEventListener('keydown', (e) => {\n"
1247
+ " if (e.key === 'Enter') {\n"
1248
+ " e.preventDefault();\n"
1249
+ " e.stopPropagation();\n"
1250
+ " e.stopImmediatePropagation();\n"
1251
+ " applyNames();\n"
1252
+ " }\n"
1253
+ " }, true);\n"
1254
+ " });\n"
1255
+ " [aliceEmojiInput, bobEmojiInput].forEach(select => {\n"
1256
+ " select.addEventListener('change', applyNames);\n"
1257
+ " });\n"
1258
+ " }\n"
1259
+ " loadAgentNames();\n"
1260
+ " function setupRoundCollapse() {\n"
1261
+ " document.addEventListener('click', function(e) {\n"
1262
+ " if (e.target.closest('input, textarea, select, button, .round-context-edit, .toolbar')) { return; }\n"
1263
+ " const divider = e.target.closest('.chat-group-divider, .group-divider');\n"
1264
+ " if (!divider) return;\n"
1265
+ " divider.classList.toggle('collapsed');\n"
1266
+ " const isCollapsed = divider.classList.contains('collapsed');\n"
1267
+ " let nextElement = divider.nextElementSibling;\n"
1268
+ " while (nextElement) {\n"
1269
+ " if (nextElement.classList.contains('chat-group-divider') || nextElement.classList.contains('group-divider')) {\n"
1270
+ " break;\n"
1271
+ " }\n"
1272
+ " if (isCollapsed) {\n"
1273
+ " if (!nextElement.dataset.originalDisplay) {\n"
1274
+ " nextElement.dataset.originalDisplay = nextElement.style.display || getComputedStyle(nextElement).display;\n"
1275
+ " }\n"
1276
+ " nextElement.style.display = 'none';\n"
1277
+ " } else {\n"
1278
+ " if (nextElement.dataset.originalDisplay) {\n"
1279
+ " const originalDisplay = nextElement.dataset.originalDisplay;\n"
1280
+ " nextElement.style.display = originalDisplay === 'none' ? '' : originalDisplay;\n"
1281
+ " if (nextElement.style.display === originalDisplay && originalDisplay !== 'none') {\n"
1282
+ " nextElement.style.display = '';\n"
1283
+ " }\n"
1284
+ " delete nextElement.dataset.originalDisplay;\n"
1285
+ " } else {\n"
1286
+ " nextElement.style.display = '';\n"
1287
+ " }\n"
1288
+ " }\n"
1289
+ " nextElement = nextElement.nextElementSibling;\n"
1290
+ " }\n"
1291
+ " e.stopPropagation();\n"
1292
+ " });\n"
1293
+ " }\n"
1294
+ " setupRoundCollapse();\n"
1295
+ " const strongHideBtnChat = document.getElementById('toggle-strong-hide');\n"
1296
+ " function applyStrongHideToChat() {\n"
1297
+ " if (!chatFlow) return;\n"
1298
+ " chatFlow.classList.toggle('strong-hide', strongHideOn);\n"
1299
+ " const contextEdits = chatFlow.querySelectorAll('.round-context-edit');\n"
1300
+ " contextEdits.forEach(edit => {\n"
1301
+ " const parent = edit.closest('.round-context, .agent-context-box, .split-agent-context');\n"
1302
+ " if (parent) {\n"
1303
+ " if (strongHideOn && edit.textContent.trim() === '') {\n"
1304
+ " parent.style.display = 'none';\n"
1305
+ " } else {\n"
1306
+ " parent.style.display = '';\n"
1307
+ " }\n"
1308
+ " }\n"
1309
+ " });\n"
1310
+ " const splitContexts = chatFlow.querySelectorAll('.split-agent-context');\n"
1311
+ " splitContexts.forEach(split => {\n"
1312
+ " if (strongHideOn) {\n"
1313
+ " const boxes = split.querySelectorAll('.agent-context-box');\n"
1314
+ " const allEmpty = Array.from(boxes).every(box => {\n"
1315
+ " const edit = box.querySelector('.round-context-edit');\n"
1316
+ " return edit && edit.textContent.trim() === '';\n"
1317
+ " });\n"
1318
+ " if (allEmpty) split.style.display = 'none';\n"
1319
+ " }\n"
1320
+ " });\n"
1321
+ " }\n"
1322
+ " if (strongHideBtnChat && chatFlow) {\n"
1323
+ " strongHideBtnChat.addEventListener('click', () => {\n"
1324
+ " setTimeout(() => applyStrongHideToChat(), 0);\n"
1325
+ " });\n"
1326
+ " }\n"
1327
+ " document.addEventListener('click', function(e) {\n"
1328
+ " if (e.target.closest('input, textarea, select, .round-context-edit, .toolbar')) { return; }\n"
1329
+ " const chatReasoning = e.target.closest('.chat-reasoning');\n"
1330
+ " if (chatReasoning) {\n"
1331
+ " chatReasoning.classList.toggle('collapsed');\n"
1332
+ " }\n"
1333
+ " });\n"
1334
+ " function applyColorToSelection(color, element) {\n"
1335
+ " const selection = window.getSelection();\n"
1336
+ " if (!selection.rangeCount) return false;\n"
1337
+ " const range = selection.getRangeAt(0);\n"
1338
+ " if (!element.contains(range.commonAncestorContainer)) return false;\n"
1339
+ " const selectedText = range.toString();\n"
1340
+ " if (!selectedText) return false;\n"
1341
+ " if (color === 'default') {\n"
1342
+ " // Remove styling - just extract the text content\n"
1343
+ " const textNode = document.createTextNode(selectedText);\n"
1344
+ " range.deleteContents();\n"
1345
+ " range.insertNode(textNode);\n"
1346
+ " } else {\n"
1347
+ " const span = document.createElement('span');\n"
1348
+ " span.style.color = color;\n"
1349
+ " span.style.fontWeight = '600';\n"
1350
+ " try {\n"
1351
+ " range.surroundContents(span);\n"
1352
+ " } catch (e) {\n"
1353
+ " const contents = range.extractContents();\n"
1354
+ " span.appendChild(contents);\n"
1355
+ " range.insertNode(span);\n"
1356
+ " }\n"
1357
+ " }\n"
1358
+ " return true;\n"
1359
+ " }\n"
1360
+ " let lastFocusedContextEdit = null;\n"
1361
+ " document.addEventListener('focusin', function(e) {\n"
1362
+ " if (e.target.classList.contains('round-context-edit')) {\n"
1363
+ " lastFocusedContextEdit = e.target;\n"
1364
+ " }\n"
1365
+ " });\n"
1366
+ " document.addEventListener('mousedown', function(e) {\n"
1367
+ " if (e.target.classList.contains('context-color-btn')) {\n"
1368
+ " e.preventDefault();\n"
1369
+ " }\n"
1370
+ " });\n"
1371
+ " document.addEventListener('click', function(e) {\n"
1372
+ " if (e.target.closest('input:not(.round-context-edit), textarea, select') && !e.target.classList.contains('context-color-btn')) { return; }\n"
1373
+ " if (e.target.classList.contains('context-color-btn')) {\n"
1374
+ " e.preventDefault();\n"
1375
+ " const color = e.target.dataset.color;\n"
1376
+ " const controls = e.target.closest('.round-context-controls');\n"
1377
+ " const contextEdit = controls ? controls.previousElementSibling : null;\n"
1378
+ " if (contextEdit && contextEdit.classList.contains('round-context-edit')) {\n"
1379
+ " contextEdit.focus();\n"
1380
+ " const selection = window.getSelection();\n"
1381
+ " if (selection.rangeCount > 0 && selection.toString().length > 0 && contextEdit.contains(selection.anchorNode)) {\n"
1382
+ " if (applyColorToSelection(color, contextEdit)) {\n"
1383
+ " const key = contextEdit.dataset.contextKey;\n"
1384
+ " localStorage.setItem(key, contextEdit.innerHTML);\n"
1385
+ " }\n"
1386
+ " } else {\n"
1387
+ " try {\n"
1388
+ " if (color !== 'default') {\n"
1389
+ " document.execCommand('styleWithCSS', false, true);\n"
1390
+ " document.execCommand('foreColor', false, color);\n"
1391
+ " }\n"
1392
+ " const key = contextEdit.dataset.contextKey;\n"
1393
+ " setTimeout(() => localStorage.setItem(key, contextEdit.innerHTML), 10);\n"
1394
+ " } catch (e) {\n"
1395
+ " console.log('Color command failed:', e);\n"
1396
+ " }\n"
1397
+ " }\n"
1398
+ " }\n"
1399
+ " }\n"
1400
+ " });\n"
1401
+ " const contextEdits = document.querySelectorAll('.round-context-edit');\n"
1402
+ " contextEdits.forEach(edit => {\n"
1403
+ " edit.addEventListener('input', function() {\n"
1404
+ " const key = this.dataset.contextKey;\n"
1405
+ " localStorage.setItem(key, this.innerHTML);\n"
1406
+ " });\n"
1407
+ " const key = edit.dataset.contextKey;\n"
1408
+ " const saved = localStorage.getItem(key);\n"
1409
+ " if (saved) {\n"
1410
+ " edit.innerHTML = saved;\n"
1411
+ " }\n"
1412
+ " });\n"
1413
+ " document.addEventListener('click', function(e) {\n"
1414
+ " if (e.target.closest('input, textarea, select, .round-context-edit') && !e.target.classList.contains('merge-btn') && !e.target.classList.contains('unmerge-btn')) { return; }\n"
1415
+ " if (e.target.classList.contains('merge-btn')) {\n"
1416
+ " e.preventDefault();\n"
1417
+ " e.stopPropagation();\n"
1418
+ " const msgId = e.target.dataset.msgId;\n"
1419
+ " const currentMsg = e.target.closest('.chat-message');\n"
1420
+ " if (!currentMsg) return;\n"
1421
+ " if (currentMsg.classList.contains('role-user')) {\n"
1422
+ " alert('Cannot merge user messages');\n"
1423
+ " return;\n"
1424
+ " }\n"
1425
+ " let nextMsg = currentMsg.nextElementSibling;\n"
1426
+ " while (nextMsg && !nextMsg.classList.contains('chat-message')) {\n"
1427
+ " nextMsg = nextMsg.nextElementSibling;\n"
1428
+ " }\n"
1429
+ " while (nextMsg && nextMsg.classList.contains('role-user')) {\n"
1430
+ " nextMsg = nextMsg.nextElementSibling;\n"
1431
+ " while (nextMsg && !nextMsg.classList.contains('chat-message')) {\n"
1432
+ " nextMsg = nextMsg.nextElementSibling;\n"
1433
+ " }\n"
1434
+ " }\n"
1435
+ " if (!nextMsg || nextMsg.classList.contains('chat-message') === false) {\n"
1436
+ " alert('No next assistant message to merge with');\n"
1437
+ " return;\n"
1438
+ " }\n"
1439
+ " if (nextMsg.classList.contains('role-user')) {\n"
1440
+ " alert('Cannot merge with user messages');\n"
1441
+ " return;\n"
1442
+ " }\n"
1443
+ " const parent = currentMsg.parentElement;\n"
1444
+ " if (parent.classList.contains('simultaneous-messages')) {\n"
1445
+ " const wrapper = parent;\n"
1446
+ " currentMsg.style.display = '';\n"
1447
+ " currentMsg.classList.remove('merged');\n"
1448
+ " const refNode = wrapper.nextElementSibling;\n"
1449
+ " parent.parentElement.insertBefore(currentMsg, refNode);\n"
1450
+ " if (nextMsg.parentElement === wrapper) {\n"
1451
+ " parent.parentElement.insertBefore(nextMsg, refNode);\n"
1452
+ " }\n"
1453
+ " if (wrapper.children.length === 0) {\n"
1454
+ " wrapper.remove();\n"
1455
+ " }\n"
1456
+ " } else {\n"
1457
+ " const wrapper = document.createElement('div');\n"
1458
+ " wrapper.className = 'simultaneous-messages';\n"
1459
+ " const unmergeBtn = document.createElement('button');\n"
1460
+ " unmergeBtn.className = 'unmerge-btn';\n"
1461
+ " unmergeBtn.innerHTML = '✕';\n"
1462
+ " unmergeBtn.title = 'Click to unmerge messages';\n"
1463
+ " wrapper.appendChild(unmergeBtn);\n"
1464
+ " wrapper.dataset.firstMsgId = currentMsg.dataset.msgId;\n"
1465
+ " wrapper.dataset.secondMsgId = nextMsg.dataset.msgId;\n"
1466
+ " parent.insertBefore(wrapper, currentMsg);\n"
1467
+ " wrapper.appendChild(currentMsg);\n"
1468
+ " wrapper.appendChild(nextMsg);\n"
1469
+ " currentMsg.classList.add('merged');\n"
1470
+ " nextMsg.classList.add('merged');\n"
1471
+ " }\n"
1472
+ " }\n"
1473
+ " if (e.target.classList.contains('unmerge-btn')) {\n"
1474
+ " const wrapper = e.target.closest('.simultaneous-messages');\n"
1475
+ " if (!wrapper) return;\n"
1476
+ " const parent = wrapper.parentElement;\n"
1477
+ " const firstMsgId = wrapper.dataset.firstMsgId;\n"
1478
+ " const secondMsgId = wrapper.dataset.secondMsgId;\n"
1479
+ " const messages = Array.from(wrapper.querySelectorAll('.chat-message'));\n"
1480
+ " const refNode = wrapper.nextElementSibling;\n"
1481
+ " const firstMsg = messages.find(m => m.dataset.msgId === firstMsgId);\n"
1482
+ " const secondMsg = messages.find(m => m.dataset.msgId === secondMsgId);\n"
1483
+ " if (firstMsg) {\n"
1484
+ " firstMsg.classList.remove('merged');\n"
1485
+ " firstMsg.style.display = '';\n"
1486
+ " parent.insertBefore(firstMsg, refNode);\n"
1487
+ " }\n"
1488
+ " if (secondMsg) {\n"
1489
+ " secondMsg.classList.remove('merged');\n"
1490
+ " secondMsg.style.display = '';\n"
1491
+ " parent.insertBefore(secondMsg, refNode);\n"
1492
+ " }\n"
1493
+ " wrapper.remove();\n"
1494
+ " }\n"
1495
+ " });\n"
1496
+ "});\n"
1497
+ "</script>",
1498
+ "</head>",
1499
+ "<body>",
1500
+ '<div class="toolbar-wrap">',
1501
+ '<div class="toolbar-hotzone"></div>',
1502
+ '<div class="toolbar">',
1503
+ '<label for="group-size">Group every</label>',
1504
+ '<input id="group-size" type="number" min="0" step="1" value="1" />',
1505
+ "<span>timesteps</span>",
1506
+ '<button id="apply-grouping">Apply</button>',
1507
+ '<span style="margin-left:8px"></span>',
1508
+ '<label for="range-start"><span class="emoji-bw">🔎</span> Range</label>',
1509
+ '<input id="range-start" type="number" step="1" />',
1510
+ "<span>to</span>",
1511
+ '<input id="range-end" type="number" step="1" />',
1512
+ '<button id="apply-range"><span class="emoji-bw">▶︎</span> Apply</button>',
1513
+ '<button id="toggle-strong-hide"><span class="emoji-bw">🗜️</span> Strong Hide: <span id="strong-hide-state">Off</span></button>',
1514
+ (
1515
+ '<button id="toggle-split-view"><span class="emoji-bw">🪟</span> Split View: <span id="split-view-state">Off</span></button>'
1516
+ if enable_split_view
1517
+ else ""
1518
+ ),
1519
+ '<button id="toggle-chat-view"><span class="emoji-bw">💬</span> Chat View: <span id="chat-view-state">On</span></button>',
1520
+ '<button id="toggle-hide-user-messages"><span class="emoji-bw">👁️</span> Hide Prompts: <span id="hide-user-state">Off</span></button>',
1521
+ '<span id="chat-width-control" style="margin-left:8px;">',
1522
+ '<label for="chat-width-slider"><span class="emoji-bw">↔️</span> Width:</label>',
1523
+ '<input id="chat-width-slider" type="range" min="600" max="1600" step="50" value="900" style="width:120px; vertical-align:middle;" />',
1524
+ '<span id="chat-width-value" style="margin-left:4px;">900px</span>',
1525
+ '</span>',
1526
+ '<span style="margin-left:12px;">',
1527
+ '<label for="font-family-select"><span class="emoji-bw">🔤</span> Font:</label>',
1528
+ '<select id="font-family-select" style="padding:2px 6px; border:1px solid var(--accent-muted); border-radius:var(--corner-radius); background:var(--bg);">',
1529
+ '<option value="\'Segoe UI\', Tahoma, Geneva, Verdana, sans-serif">Segoe UI</option>',
1530
+ '<option value="Arial, sans-serif">Arial</option>',
1531
+ '<option value="\'Helvetica Neue\', Helvetica, sans-serif">Helvetica</option>',
1532
+ '<option value="\'Times New Roman\', Times, serif">Times New Roman</option>',
1533
+ '<option value="Georgia, serif">Georgia</option>',
1534
+ '<option value="\'Courier New\', Courier, monospace">Courier New</option>',
1535
+ '<option value="\'Comic Sans MS\', cursive">Comic Sans</option>',
1536
+ '<option value="\'Trebuchet MS\', sans-serif">Trebuchet MS</option>',
1537
+ '<option value="Verdana, sans-serif">Verdana</option>',
1538
+ '<option value="\'Palatino Linotype\', \'Book Antiqua\', Palatino, serif">Palatino</option>',
1539
+ '<option value="\'Lucida Console\', Monaco, monospace">Lucida Console</option>',
1540
+ '</select>',
1541
+ '</span>',
1542
+ '<span style="margin-left:8px;">',
1543
+ '<label for="font-size-input"><span class="emoji-bw">📏</span> Size:</label>',
1544
+ '<input id="font-size-input" type="number" min="8" max="24" step="1" value="14" style="width:50px;" />',
1545
+ '<span>px</span>',
1546
+ '</span>',
1547
+ '<span style="margin-left:12px; display:flex; align-items:center; gap:8px;">',
1548
+ '<label style="font-weight:600;">Agent Names:</label>',
1549
+ '<select id="alice-emoji-input" style="width:65px; padding:2px 6px; border:1px solid var(--accent-muted); border-radius:var(--corner-radius); background:var(--bg);">',
1550
+ '<option value="🤖">🤖 Robot</option>',
1551
+ '<option value="👤">👤 Human</option>',
1552
+ '</select>',
1553
+ '<input id="alice-name-input" type="text" placeholder="Alice" style="width:80px; padding:2px 6px; border:1px solid var(--accent-muted); border-radius:var(--corner-radius); background:var(--bg);" />',
1554
+ '<span style="margin:0 4px;">|</span>',
1555
+ '<select id="bob-emoji-input" style="width:65px; padding:2px 6px; border:1px solid var(--accent-muted); border-radius:var(--corner-radius); background:var(--bg);">',
1556
+ '<option value="🤖">🤖 Robot</option>',
1557
+ '<option value="👤">👤 Human</option>',
1558
+ '</select>',
1559
+ '<input id="bob-name-input" type="text" placeholder="Bob" style="width:80px; padding:2px 6px; border:1px solid var(--accent-muted); border-radius:var(--corner-radius); background:var(--bg);" />',
1560
+ '<button id="apply-agent-names" style="padding:4px 8px; border:1px solid var(--accent-muted); background:var(--panel-bg); border-radius:var(--corner-radius); cursor:pointer;">Apply</button>',
1561
+ '</span>',
1562
+ "</div>",
1563
+ "</div>",
1564
+ '<div id="flow-linear" class="messages-flow" style="display:none">',
1565
+ ]
1566
+
1567
+ last_time_step = None
1568
+ for original_index, turn in indexed_turns:
1569
+ # Build classes
1570
+ agent_class = f"agent-{re.sub('[^a-z0-9_-]', '-', turn.agent_id.lower())}"
1571
+ role_class = f"role-{turn.role}"
1572
+ collapsed_class = " collapsed" if turn.role == "user" else ""
1573
+
1574
+ # Badge content
1575
+ agent_id_clean = html.escape(turn.agent_id).lower()
1576
+ if turn.role == "assistant":
1577
+ name = html.escape(turn.agent_id)
1578
+ emoji = '<span class="emoji-bw" data-agent-id="' + agent_id_clean + '"> 🤖</span>'
1579
+ raw_val = turn.reward
1580
+ if isinstance(raw_val, (int, float)):
1581
+ reward_val = f"{raw_val:.4f}".rstrip("0").rstrip(".")
1582
+ if len(reward_val) > 8:
1583
+ reward_val = reward_val[:8] + "…"
1584
+ else:
1585
+ reward_val = str(raw_val)
1586
+ # Format: "🤖 Alice • Reward: 5.5556 • 💬 :"
1587
+ badge_inner = (
1588
+ f'{emoji} <span class="agent-name" data-agent-id="{agent_id_clean}">{name}</span>'
1589
+ f' <span class="sep"> • </span><span class="reward">Reward ⚑ = {reward_val}</span>'
1590
+ )
1591
+ else:
1592
+ # For user messages, show "Prompt of {Agent ID}" in the badge
1593
+ name = html.escape(turn.agent_id)
1594
+ # Format (no reward): "Prompt of Alice • "
1595
+ badge_inner = f'Prompt of <span class="agent-name" data-agent-id="{agent_id_clean}">{name}</span> <span class="sep"> • </span>:'
1596
+
1597
+ badge = f'<span class="agent-badge">{badge_inner}</span>'
1598
+
1599
+ # Inline timestep distinction badge at step boundaries (render before first message)
1600
+ ts_badge_html = ""
1601
+ if last_time_step is None or turn.time_step != last_time_step:
1602
+ ts_badge_html = f'<span class="ts-badge">⏱ {turn.time_step}</span>'
1603
+ last_time_step = turn.time_step
1604
+
1605
+ escaped_content = html.escape(turn.content)
1606
+ reasoning_html = ""
1607
+ if turn.reasoning_content:
1608
+ # Normalize reasoning to avoid leading/newline whitespace that creates visual gaps
1609
+ _raw_reasoning = turn.reasoning_content.replace("\r\n", "\n")
1610
+ _raw_reasoning = _re.sub(
1611
+ r"^\s*\n+", "", _raw_reasoning
1612
+ ) # drop leading blank lines
1613
+ _raw_reasoning = _re.sub(
1614
+ r"\*\*(\s*\n\s*)", r"** ", _raw_reasoning
1615
+ ) # newline right after **
1616
+ _raw_reasoning = _re.sub(
1617
+ r"(\s*\n\s*)\*\*", r" **", _raw_reasoning
1618
+ ) # newline right before **
1619
+ escaped_reasoning = html.escape(_raw_reasoning)
1620
+ reasoning_html = f'<span class="reasoning-inline"><span class="reasoning-icon">💭</span><span class="reasoning-text">{escaped_reasoning}</span></span>'
1621
+ collapsed_text = re.sub(r"\s+", " ", escaped_content).strip()
1622
+
1623
+ html_parts.append(
1624
+ f'<div class="chat-turn {agent_class} {role_class}{collapsed_class}" data-time-step="{turn.time_step}">'
1625
+ f'<div class="turn-content {agent_class} {role_class}">{ts_badge_html}{badge}'
1626
+ f'<span class="message-box">{reasoning_html}<span class="main-content">💬 {collapsed_text}</span></span>'
1627
+ f'<span class="message-placeholder">(...)</span>'
1628
+ f"</div>"
1629
+ f"</div>"
1630
+ )
1631
+
1632
+ html_parts.append("</div>") # close linear flow
1633
+ if enable_split_view:
1634
+ import html as _html_mod
1635
+
1636
+ html_parts.append(
1637
+ '<div id="flow-split" class="messages-flow" style="display:none">'
1638
+ )
1639
+ html_parts.append('<div class="split-wrapper">')
1640
+ # Per-agent columns
1641
+ per_agent_turns = {
1642
+ aid: [t for t in chat_turns if t.agent_id == aid]
1643
+ for aid in assistant_agents
1644
+ }
1645
+ for idx, aid in enumerate(assistant_agents):
1646
+ turns_agent = per_agent_turns[aid]
1647
+ html_parts.append(
1648
+ f'<div class="split-col" data-agent="{_html_mod.escape(aid)}">'
1649
+ )
1650
+ last_ts_agent = None
1651
+ for turn in turns_agent:
1652
+ agent_class = (
1653
+ f"agent-{re.sub('[^a-z0-9_-]', '-', turn.agent_id.lower())}"
1654
+ )
1655
+ role_class = f"role-{turn.role}"
1656
+ collapsed_class = " collapsed" if turn.role == "user" else ""
1657
+ ts_badge_html = ""
1658
+ if last_ts_agent is None or turn.time_step != last_ts_agent:
1659
+ ts_badge_html = f'<span class="ts-badge">⏱ {turn.time_step}</span>'
1660
+ last_ts_agent = turn.time_step
1661
+ esc_content = _html_mod.escape(turn.content)
1662
+ reasoning_html = ""
1663
+ if turn.reasoning_content:
1664
+ _raw_reasoning = turn.reasoning_content.replace("\r\n", "\n")
1665
+ _raw_reasoning = _re.sub(r"^\s*\n+", "", _raw_reasoning)
1666
+ _raw_reasoning = _re.sub(r"\*\*(\s*\n\s*)", r"** ", _raw_reasoning)
1667
+ _raw_reasoning = _re.sub(r"(\s*\n\s*)\*\*", r" **", _raw_reasoning)
1668
+ esc_reasoning = _html_mod.escape(_raw_reasoning)
1669
+ reasoning_html = f'<span class="reasoning-inline"><span class="reasoning-icon">💭</span><span class="reasoning-text">{esc_reasoning}</span></span>'
1670
+ collapsed_text = re.sub(r"\s+", " ", esc_content).strip()
1671
+ agent_id_clean = _html_mod.escape(turn.agent_id).lower()
1672
+ if turn.role == "assistant":
1673
+ name = _html_mod.escape(turn.agent_id)
1674
+ emoji = '<span class="emoji-bw" data-agent-id="' + agent_id_clean + '"> 🤖</span>'
1675
+ raw_val = turn.reward
1676
+ if isinstance(raw_val, (int, float)):
1677
+ reward_val = f"{raw_val:.4f}".rstrip("0").rstrip(".")
1678
+ if len(reward_val) > 8:
1679
+ reward_val = reward_val[:8] + "…"
1680
+ else:
1681
+ reward_val = str(raw_val)
1682
+ badge_inner = (
1683
+ f'{emoji} <span class="agent-name" data-agent-id="{agent_id_clean}">{name}</span>'
1684
+ f' <span class="sep"> • </span><span class="reward">Reward ⚑ : {reward_val}</span>'
1685
+ )
1686
+ else:
1687
+ name = _html_mod.escape(turn.agent_id)
1688
+ badge_inner = f'Prompt of <span class="agent-name" data-agent-id="{agent_id_clean}">{name}</span> <span class="sep"> • </span>:'
1689
+ badge = f'<span class="agent-badge">{badge_inner}</span>'
1690
+ html_parts.append(
1691
+ f'<div class="chat-turn {agent_class} {role_class}{collapsed_class}" data-time-step="{turn.time_step}">'
1692
+ f'<div class="turn-content {agent_class} {role_class}">{ts_badge_html}{badge}'
1693
+ f'<span class="message-box">{reasoning_html}<span class="main-content">💬 {collapsed_text}</span></span>'
1694
+ f'<span class="message-placeholder">(...)</span>'
1695
+ f"</div></div>"
1696
+ )
1697
+ html_parts.append("</div>") # close split col
1698
+ html_parts.append("</div>") # split-wrapper
1699
+ html_parts.append("</div>") # flow-split
1700
+
1701
+ # Add Chat View
1702
+ import html as _html_mod
1703
+ html_parts.append('<div id="flow-chat" class="messages-flow">')
1704
+
1705
+ # Helper function to add context annotation areas
1706
+ def add_context_area(position: str, time_step: int):
1707
+ context_key = f"round-context-{position}-{time_step}"
1708
+ placeholder = f"Add context {position} round {time_step}..."
1709
+ color_buttons = ""
1710
+ # Add default/reset color button first
1711
+ color_buttons += (
1712
+ f'<div class="context-color-btn" data-color="default" '
1713
+ f'style="background: linear-gradient(135deg, #000 25%, transparent 25%, transparent 75%, #000 75%), '
1714
+ f'linear-gradient(135deg, #000 25%, transparent 25%, transparent 75%, #000 75%); '
1715
+ f'background-size: 4px 4px; background-position: 0 0, 2px 2px; '
1716
+ f'background-color: #fff;" title="Default color"></div>'
1717
+ )
1718
+ for color_name, color_value in [
1719
+ ('red', '#d32f2f'),
1720
+ ('orange', '#f57c00'),
1721
+ ('yellow', '#f9a825'),
1722
+ ('green', '#388e3c'),
1723
+ ('blue', '#1976d2'),
1724
+ ('purple', '#7b1fa2'),
1725
+ ('gray', '#666666'),
1726
+ ]:
1727
+ color_buttons += (
1728
+ f'<div class="context-color-btn" data-color="{color_value}" '
1729
+ f'style="background-color: {color_value};" title="{color_name}"></div>'
1730
+ )
1731
+
1732
+ html_parts.append(
1733
+ f'<div class="round-context">'
1734
+ f'<div class="round-context-edit" contenteditable="true" spellcheck="true" '
1735
+ f'data-context-key="{context_key}" '
1736
+ f'data-placeholder="{placeholder}"></div>'
1737
+ f'<div class="round-context-controls">{color_buttons}</div>'
1738
+ f'</div>'
1739
+ )
1740
+
1741
+ # Helper function to add split agent context boxes
1742
+ def add_split_agent_contexts(position: str, time_step: int):
1743
+ color_buttons = ""
1744
+ # Add default/reset color button first
1745
+ color_buttons += (
1746
+ f'<div class="context-color-btn" data-color="default" '
1747
+ f'style="background: linear-gradient(135deg, #000 25%, transparent 25%, transparent 75%, #000 75%), '
1748
+ f'linear-gradient(135deg, #000 25%, transparent 25%, transparent 75%, #000 75%); '
1749
+ f'background-size: 4px 4px; background-position: 0 0, 2px 2px; '
1750
+ f'background-color: #fff;" title="Default color"></div>'
1751
+ )
1752
+ for color_name, color_value in [
1753
+ ('red', '#d32f2f'),
1754
+ ('orange', '#f57c00'),
1755
+ ('yellow', '#f9a825'),
1756
+ ('green', '#388e3c'),
1757
+ ('blue', '#1976d2'),
1758
+ ('purple', '#7b1fa2'),
1759
+ ('gray', '#666666'),
1760
+ ]:
1761
+ color_buttons += (
1762
+ f'<div class="context-color-btn" data-color="{color_value}" '
1763
+ f'style="background-color: {color_value};" title="{color_name}"></div>'
1764
+ )
1765
+
1766
+ html_parts.append('<div class="split-agent-context">')
1767
+
1768
+ # Alice box
1769
+ alice_key = f"agent-context-alice-{position}-{time_step}"
1770
+ alice_placeholder = f"..."
1771
+ html_parts.append(
1772
+ f'<div class="agent-context-box agent-alice">'
1773
+ f'<div class="round-context-edit" contenteditable="true" spellcheck="true" '
1774
+ f'data-context-key="{alice_key}" '
1775
+ f'data-placeholder="{alice_placeholder}"></div>'
1776
+ f'<div class="round-context-controls">{color_buttons}</div>'
1777
+ f'</div>'
1778
+ )
1779
+
1780
+ # Bob box
1781
+ bob_key = f"agent-context-bob-{position}-{time_step}"
1782
+ bob_placeholder = f"..."
1783
+ html_parts.append(
1784
+ f'<div class="agent-context-box agent-bob">'
1785
+ f'<div class="round-context-edit" contenteditable="true" spellcheck="true" '
1786
+ f'data-context-key="{bob_key}" '
1787
+ f'data-placeholder="{bob_placeholder}"></div>'
1788
+ f'<div class="round-context-controls">{color_buttons}</div>'
1789
+ f'</div>'
1790
+ )
1791
+
1792
+ html_parts.append('</div>') # split-agent-context
1793
+
1794
+ last_time_step_chat = None
1795
+ for original_index, turn in indexed_turns:
1796
+ agent_class = f"agent-{re.sub('[^a-z0-9_-]', '-', turn.agent_id.lower())}"
1797
+ role_class = f"role-{turn.role}"
1798
+
1799
+ # Add time step divider and beginning context
1800
+ if last_time_step_chat is None or turn.time_step != last_time_step_chat:
1801
+ # Add end contexts for previous round (only regular context, not prompt summary)
1802
+ if last_time_step_chat is not None:
1803
+ add_context_area("end", last_time_step_chat)
1804
+
1805
+ html_parts.append(
1806
+ f'<div class="chat-group-divider">'
1807
+ f'<span class="chat-group-label">⏱ Round {turn.time_step + 1}</span>'
1808
+ f'</div>'
1809
+ )
1810
+
1811
+ # Add beginning contexts for new round (both context and prompt summary)
1812
+ add_context_area("beginning", turn.time_step)
1813
+ add_split_agent_contexts("beginning", turn.time_step)
1814
+
1815
+ last_time_step_chat = turn.time_step
1816
+
1817
+ # Build chat message with merge controls
1818
+ html_parts.append(f'<div class="chat-message {agent_class} {role_class}" data-msg-id="{original_index}">')
1819
+
1820
+ # Add merge control button
1821
+ html_parts.append(
1822
+ f'<button class="merge-btn" title="Merge with next message" data-msg-id="{original_index}">⇄</button>'
1823
+ )
1824
+
1825
+ html_parts.append('<div class="chat-message-content">')
1826
+
1827
+ # Header with agent name and reward (always show reward)
1828
+ agent_id_clean = _html_mod.escape(turn.agent_id).lower()
1829
+ if turn.role == "assistant":
1830
+ name = _html_mod.escape(turn.agent_id)
1831
+ raw_val = turn.reward
1832
+ if isinstance(raw_val, (int, float)):
1833
+ reward_val = f"{raw_val:.4f}".rstrip("0").rstrip(".")
1834
+ if len(reward_val) > 8:
1835
+ reward_val = reward_val[:8] + "…"
1836
+ else:
1837
+ reward_val = str(raw_val)
1838
+ header_html = (
1839
+ f'<div class="chat-header">'
1840
+ f'<span class="emoji-bw" data-agent-id="{agent_id_clean}">🤖</span> <span class="agent-name" data-agent-id="{agent_id_clean}">{name}</span>'
1841
+ f'<span class="chat-reward">⚑ {reward_val}</span>'
1842
+ f'</div>'
1843
+ )
1844
+ else:
1845
+ name = _html_mod.escape(turn.agent_id)
1846
+ header_html = f'<div class="chat-header">Prompt of <span class="agent-name" data-agent-id="{agent_id_clean}">{name}</span></div>'
1847
+
1848
+ html_parts.append(header_html)
1849
+
1850
+ # Reasoning content if present
1851
+ if turn.reasoning_content:
1852
+ _raw_reasoning = turn.reasoning_content.replace("\r\n", "\n")
1853
+ _raw_reasoning = _re.sub(r"^\s*\n+", "", _raw_reasoning)
1854
+ esc_reasoning = _html_mod.escape(_raw_reasoning)
1855
+ html_parts.append(
1856
+ f'<div class="chat-reasoning collapsed">'
1857
+ f'<span class="reasoning-icon">💭</span> '
1858
+ f'<span class="reasoning-text">{esc_reasoning}</span>'
1859
+ f'</div>'
1860
+ )
1861
+
1862
+ # Message bubble
1863
+ esc_content = _html_mod.escape(turn.content)
1864
+ html_parts.append(f'<div class="chat-bubble">{esc_content}</div>')
1865
+
1866
+ html_parts.append('</div>') # chat-message-content
1867
+ html_parts.append('</div>') # chat-message
1868
+
1869
+ # Add end contexts for the last round (only regular context, not prompt summary)
1870
+ if last_time_step_chat is not None:
1871
+ add_context_area("end", last_time_step_chat)
1872
+
1873
+ html_parts.append("</div>") # flow-chat
1874
+ html_parts.extend(["</body>", "</html>"])
1875
+
1876
+ return "\n".join(html_parts)
1877
+
1878
+
1879
+ def export_html_from_rollout_tree(path: Path, outdir: Path, main_only: bool = False):
1880
+ """Process a rollout tree file and generate HTML files for each path.
1881
+ Creates separate HTML files for the main path and each branch path.
1882
+ The main path is saved in the root output directory, while branch paths
1883
+ are saved in a 'branches' subdirectory.
1884
+
1885
+ Args:
1886
+ path: Path to the rollout tree JSON file
1887
+ outdir: Output directory for HTML files
1888
+ main_only: If True, only export the main trajectory (default: False)
1889
+ """
1890
+ root = load_rollout_tree(path)
1891
+ mgid = root.id
1892
+
1893
+ main_path, branch_paths = get_rollout_tree_paths(root)
1894
+
1895
+ outdir.mkdir(parents=True, exist_ok=True)
1896
+
1897
+ # Create branches subdirectory if we have branch paths
1898
+ if not main_only and branch_paths:
1899
+ branches_dir = outdir / f"mgid:{mgid}_branches_html_renders"
1900
+ branches_dir.mkdir(parents=True, exist_ok=True)
1901
+
1902
+ # Generate HTML for the main path
1903
+ chat_turns = gather_all_chat_turns_for_path(main_path)
1904
+ html_content = html_from_chat_turns(chat_turns)
1905
+ output_file = outdir / f"mgid:{mgid}_main_html_render.render.html"
1906
+ with open(output_file, "w", encoding="utf-8") as f:
1907
+ f.write(html_content)
1908
+
1909
+ # Generate HTML for each branch path
1910
+ for path_obj in branch_paths:
1911
+ chat_turns = gather_all_chat_turns_for_path(path_obj)
1912
+
1913
+ html_content = html_from_chat_turns(chat_turns)
1914
+
1915
+ path_id: str = path_obj.id
1916
+ output_filename = f"{path_id}_html_render.render.html"
1917
+
1918
+ output_file = branches_dir / output_filename
1919
+
1920
+ with open(output_file, "w", encoding="utf-8") as f:
1921
+ f.write(html_content)
src_code_for_reproducibility/utils/rollout_tree_gather_utils.py ADDED
@@ -0,0 +1,314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import csv
4
+ import os
5
+ import pickle
6
+ import re
7
+ from collections import defaultdict
8
+ from dataclasses import dataclass
9
+ from pathlib import Path
10
+ from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple
11
+
12
+ from mllm.markov_games.rollout_tree import *
13
+
14
+
15
+
16
+
17
+
18
+ def load_rollout_tree(path: Path) -> RolloutTreeRootNode:
19
+ """Load a rollout tree from a PKL file containing a dict."""
20
+ with open(path, "rb") as f:
21
+ data = pickle.load(f)
22
+ return RolloutTreeRootNode.model_validate(data)
23
+
24
+
25
+ @dataclass
26
+ class RolloutNodeList:
27
+ id: str
28
+ nodes: List[RolloutTreeNode]
29
+
30
+
31
+ def get_rollout_tree_paths(
32
+ root: RolloutTreeRootNode, mgid: Optional[str] = None
33
+ ) -> Tuple[RolloutNodeList, List[RolloutNodeList]]:
34
+ """
35
+ Returns:
36
+ main_path: The main path from the root to the end of the tree.
37
+ branch_paths: A list of all branch paths from the root to the end of the tree.
38
+ Each branch path contains a list of nodes that are part of the branch, including the nodes from the main path before the branch was taken.
39
+ """
40
+ branch_paths = []
41
+
42
+ def collect_path_nodes(current) -> List[RolloutTreeNode]:
43
+ """Recursively collect all nodes in a path starting from current node."""
44
+ if current is None:
45
+ return []
46
+
47
+ if isinstance(current, RolloutTreeNode):
48
+ return [current] + collect_path_nodes(current.child)
49
+
50
+ elif isinstance(current, RolloutTreeBranchNode):
51
+ # For branch nodes, we only follow the main_child for path collection
52
+ if current.main_child:
53
+ return [current.main_child] + collect_path_nodes(
54
+ current.main_child.child
55
+ )
56
+ else:
57
+ return []
58
+
59
+ def traverse_for_branches(
60
+ current,
61
+ main_path_prefix: List[RolloutTreeNode],
62
+ path_id: str,
63
+ current_time_step: Optional[int] = 0,
64
+ ):
65
+ """Traverse tree to collect all branch paths."""
66
+ if current is None:
67
+ return
68
+
69
+ if isinstance(current, RolloutTreeNode):
70
+ # Continue traversing with this node added to the main path prefix
71
+ new_prefix = main_path_prefix + [current]
72
+ traverse_for_branches(current.child, new_prefix, path_id, current.time_step)
73
+
74
+ elif isinstance(current, RolloutTreeBranchNode):
75
+ # Collect all branch paths
76
+ if current.branches:
77
+ for agent_id, branch_node_list in current.branches.items():
78
+ if branch_node_list:
79
+ # Start with the main path prefix, then recursively collect all nodes in this branch
80
+ branch_path_nodes = main_path_prefix.copy()
81
+ for branch_node in branch_node_list:
82
+ branch_path_nodes.extend(collect_path_nodes(branch_node))
83
+
84
+ # Create proper branch path ID with mgid, agent_id, and time_step
85
+ mgid_str = mgid or str(root.id)
86
+ branch_path_id = f"mgid:{mgid_str}_type:branch_agent:{agent_id}_time_step:{current_time_step}"
87
+ branch_paths.append(
88
+ RolloutNodeList(id=branch_path_id, nodes=branch_path_nodes)
89
+ )
90
+
91
+ # Process the main child and add to prefix
92
+ new_prefix = main_path_prefix
93
+ if current.main_child:
94
+ new_prefix = main_path_prefix + [current.main_child]
95
+
96
+ # Continue traversing the main path
97
+ if current.main_child:
98
+ traverse_for_branches(
99
+ current.main_child.child,
100
+ new_prefix,
101
+ path_id,
102
+ current.main_child.time_step,
103
+ )
104
+
105
+ # Collect the main path nodes
106
+ main_path_nodes = collect_path_nodes(root.child)
107
+
108
+ # Traverse to collect all branch paths
109
+ traverse_for_branches(root.child, [], "")
110
+
111
+ # Create the main path with proper mgid format
112
+ mgid_str = mgid or str(root.id)
113
+ main_path = RolloutNodeList(id=f"mgid:{mgid_str}_type:main", nodes=main_path_nodes)
114
+
115
+ return main_path, branch_paths
116
+
117
+
118
+ class ChatTurnLog(BaseModel):
119
+ time_step: int
120
+ agent_id: str
121
+ role: str
122
+ content: str
123
+ reasoning_content: Optional[str] = None
124
+ is_state_end: bool
125
+ reward: float
126
+
127
+
128
+ def gather_agent_chat_turns_for_path(
129
+ agent_id: str, path: RolloutNodeList
130
+ ) -> List[ChatTurnLog]:
131
+ """Iterate through all chat turns for a specific agent in a path sorted by time step."""
132
+ turns = []
133
+ for node in path.nodes:
134
+ action_log = node.step_log.action_logs.get(agent_id, [])
135
+ if action_log:
136
+ for chat_turn in action_log.chat_turns or []:
137
+ turns.append(
138
+ ChatTurnLog(
139
+ time_step=node.time_step,
140
+ agent_id=agent_id,
141
+ role=chat_turn.role,
142
+ content=chat_turn.content,
143
+ reasoning_content=getattr(chat_turn, "reasoning_content", None),
144
+ is_state_end=chat_turn.is_state_end,
145
+ reward=node.step_log.simulation_step_log.rewards.get(
146
+ agent_id, 0
147
+ ),
148
+ )
149
+ )
150
+ return turns
151
+
152
+
153
+ def gather_all_chat_turns_for_path(path: RolloutNodeList) -> List[ChatTurnLog]:
154
+ """Iterate through all chat turns for all agents in a path sorted by time step."""
155
+ turns = []
156
+
157
+ # Collect turns from all agents, but interleave them per timestep by (user, assistant) pairs
158
+ for node in path.nodes:
159
+ # Build (user[, assistant]) pairs for each agent at this timestep
160
+ agent_ids = sorted(list(node.step_log.action_logs.keys()))
161
+ per_agent_pairs: Dict[str, List[List[ChatTurnLog]]] = {}
162
+
163
+ for agent_id in agent_ids:
164
+ action_log = node.step_log.action_logs.get(agent_id)
165
+ pairs: List[List[ChatTurnLog]] = []
166
+ current_pair: List[ChatTurnLog] = []
167
+
168
+ if action_log and action_log.chat_turns:
169
+ for chat_turn in action_log.chat_turns:
170
+ turn_log = ChatTurnLog(
171
+ time_step=node.time_step,
172
+ agent_id=agent_id,
173
+ role=chat_turn.role,
174
+ content=chat_turn.content,
175
+ reasoning_content=getattr(chat_turn, "reasoning_content", None),
176
+ is_state_end=chat_turn.is_state_end,
177
+ reward=node.step_log.simulation_step_log.rewards.get(
178
+ agent_id, 0
179
+ ),
180
+ )
181
+
182
+ if chat_turn.role == "user":
183
+ # If a previous pair is open, close it and start a new one
184
+ if current_pair:
185
+ pairs.append(current_pair)
186
+ current_pair = []
187
+ current_pair = [turn_log]
188
+ else:
189
+ # assistant: attach to an open user message if present; otherwise stand alone
190
+ if (
191
+ current_pair
192
+ and len(current_pair) == 1
193
+ and current_pair[0].role == "user"
194
+ ):
195
+ current_pair.append(turn_log)
196
+ pairs.append(current_pair)
197
+ current_pair = []
198
+ else:
199
+ # No preceding user or already paired; treat as its own unit
200
+ pairs.append([turn_log])
201
+
202
+ if current_pair:
203
+ # Unpaired trailing user message
204
+ pairs.append(current_pair)
205
+
206
+ per_agent_pairs[agent_id] = pairs
207
+
208
+ # Interleave pairs across agents: A1, B1, A2, B2, ...
209
+ index = 0
210
+ while True:
211
+ added_any = False
212
+ for agent_id in agent_ids:
213
+ agent_pairs = per_agent_pairs.get(agent_id, [])
214
+ if index < len(agent_pairs):
215
+ for tl in agent_pairs[index]:
216
+ turns.append(tl)
217
+ added_any = True
218
+ if not added_any:
219
+ break
220
+ index += 1
221
+
222
+ return turns
223
+
224
+
225
+ def chat_turns_to_dict(chat_turns: Iterator[ChatTurnLog]) -> Iterator[Dict[str, Any]]:
226
+ """Render all chat turns for a path as structured data for JSON."""
227
+ for chat_turn in chat_turns:
228
+ yield chat_turn.model_dump()
229
+
230
+
231
+ def get_all_agents(root: RolloutTreeRootNode) -> List[str]:
232
+ """list of all agent IDs that appear in the tree."""
233
+ if root.child is None:
234
+ return []
235
+
236
+ # Get the first node to extract all agent IDs
237
+ first_node = root.child
238
+ if isinstance(first_node, RolloutTreeBranchNode):
239
+ first_node = first_node.main_child
240
+
241
+ if first_node is None:
242
+ return []
243
+
244
+ # All agents should be present in the first node
245
+ agents = set(first_node.step_log.action_logs.keys())
246
+ agents.update(first_node.step_log.simulation_step_log.rewards.keys())
247
+
248
+ return sorted(list(agents))
249
+
250
+
251
+ def gather_agent_main_rewards(agent_id: str, path: RolloutNodeList) -> List[float]:
252
+ """Gather main rewards for a specific agent in a path."""
253
+ rewards = []
254
+ for node in path.nodes:
255
+ reward = node.step_log.simulation_step_log.rewards[agent_id]
256
+ rewards.append(reward)
257
+ return rewards
258
+
259
+
260
+ def gather_all_rewards(path: RolloutNodeList) -> List[Dict[AgentId, float]]:
261
+ """Gather main rewards from main trajectory in a path."""
262
+ rewards = []
263
+ for node in path.nodes:
264
+ rewards.append(node.step_log.simulation_step_log.rewards.copy())
265
+ return rewards
266
+
267
+
268
+ def gather_simulation_stats(
269
+ path: RolloutNodeList,
270
+ filter: Callable[[SimulationStepLog], bool],
271
+ stat_func: Callable[[SimulationStepLog], Any],
272
+ ) -> List[Any]:
273
+ """Gather stats from main trajectory in a path."""
274
+ stats = []
275
+ for node in path.nodes:
276
+ sl = node.step_log.simulation_step_log
277
+ if filter(sl):
278
+ stats.append(stat_func(sl))
279
+ return stats
280
+
281
+
282
+ def gather_simulation_step_logs(path: RolloutNodeList) -> List[SimulationStepLog]:
283
+ """Gather simulation information from main trajectory in a path."""
284
+ infos = []
285
+ for node in path.nodes:
286
+ infos.append(node.step_log.simulation_step_log)
287
+ return infos
288
+
289
+
290
+ def export_chat_logs(path: Path, outdir: Path):
291
+ """Process a rollout tree PKL file and generate a JSONL of chat turns as dicts.
292
+ Each line contains an object with path_id and chat_turns for a single path.
293
+ """
294
+ import json
295
+
296
+ root = load_rollout_tree(path)
297
+ mgid = root.id
298
+
299
+ main_path, branch_paths = get_rollout_tree_paths(root)
300
+ all_paths = [main_path] + branch_paths
301
+
302
+ outdir.mkdir(parents=True, exist_ok=True)
303
+ output_file = outdir / f"mgid:{mgid}_plucked_chats.render.jsonl"
304
+
305
+ with open(output_file, "w", encoding="utf-8") as f:
306
+ for path_obj in all_paths:
307
+ chat_turns = gather_all_chat_turns_for_path(path_obj)
308
+ output_obj = {
309
+ "path_id": str(path_obj.id),
310
+ "chat_turns": list(chat_turns_to_dict(iter(chat_turns))),
311
+ }
312
+ f.write(json.dumps(output_obj, ensure_ascii=False) + "\n")
313
+
314
+
src_code_for_reproducibility/utils/rollout_tree_stats.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Callable, List, Tuple
2
+
3
+ from mllm.markov_games.rollout_tree import RolloutTreeRootNode
4
+ from mllm.markov_games.simulation import SimulationStepLog
5
+ from mllm.utils.rollout_tree_gather_utils import (
6
+ gather_simulation_step_logs,
7
+ get_rollout_tree_paths,
8
+ )
9
+ from mllm.utils.stat_pack import StatPack
10
+
11
+
12
+ def get_rollout_tree_stat_tally(
13
+ rollout_tree: RolloutTreeRootNode,
14
+ metrics: List[Callable[[SimulationStepLog], List[Tuple[str, float]]]],
15
+ ) -> StatPack:
16
+ stat_tally = StatPack()
17
+ # get simulation step logs
18
+ node_list = get_rollout_tree_paths(rollout_tree)[0]
19
+ simulation_step_logs = gather_simulation_step_logs(node_list)
20
+ for simulation_step_log in simulation_step_logs:
21
+ for metric in metrics:
22
+ metric_result = metric(simulation_step_log)
23
+ if metric_result is not None:
24
+ for key, value in metric_result:
25
+ stat_tally.add_stat(key, value)
26
+ return stat_tally
27
+
28
+
29
+ def get_rollout_tree_mean_stats(
30
+ rollout_tree: RolloutTreeRootNode, metrics: List[Callable[[SimulationStepLog], Any]]
31
+ ) -> StatPack:
32
+ """Get the mean stats for a rollout tree."""
33
+ stat_tally = get_rollout_tree_stat_tally(rollout_tree, metrics)
34
+ return stat_tally.mean()
35
+
36
+
37
+ def get_mean_rollout_tree_stats(
38
+ rollout_trees: List[RolloutTreeRootNode],
39
+ metrics: List[Callable[[SimulationStepLog], Any]],
40
+ ) -> StatPack:
41
+ """Get the mean stats for a list of rollout trees."""
42
+ # TODO complete this
43
+ stat_tallies = [
44
+ get_rollout_tree_mean_stats(rollout_tree, metrics)
45
+ for rollout_tree in rollout_trees
46
+ ]
47
+ mean_stat_tally = StatPack()
48
+ for stat_tally in stat_tallies:
49
+ mean_stat_tally.add_stats(stat_tally)
50
+ return mean_stat_tally.mean()
src_code_for_reproducibility/utils/short_id_gen.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import uuid
2
+
3
+
4
+ def generate_short_id() -> int:
5
+ """
6
+ Generates a short unique ID for tracking adapter versions.
7
+
8
+ Returns:
9
+ int: An 8-digit integer ID.
10
+ """
11
+ return int(str(uuid.uuid4().int)[:8])
src_code_for_reproducibility/utils/stat_pack.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import json
3
+ import os
4
+ import pickle
5
+ from collections import Counter
6
+ from copy import deepcopy
7
+ from locale import strcoll
8
+ from statistics import mean
9
+ from typing import Any, Dict, Iterator, List, Optional, Tuple, TypedDict
10
+
11
+ import matplotlib.pyplot as plt
12
+ import numpy as np
13
+ from torch.utils.tensorboard import SummaryWriter
14
+
15
+ plt.style.use(
16
+ "https://raw.githubusercontent.com/dereckpiche/DedeStyle/refs/heads/main/dedestyle.mplstyle"
17
+ )
18
+
19
+ import wandb
20
+
21
+ from . import wandb_utils
22
+
23
+
24
+ class StatPack:
25
+ def __init__(self):
26
+ self.data = {}
27
+
28
+ def add_stat(self, key: str, value: float | int | None):
29
+ assert (
30
+ isinstance(value, float) or isinstance(value, int) or value is None
31
+ ), f"Value {value} is not a valid type"
32
+ if key not in self.data:
33
+ self.data[key] = []
34
+ self.data[key].append(value)
35
+
36
+ def add_stats(self, other: "StatPack"):
37
+ for key in other.keys():
38
+ self.add_stat(key, other[key])
39
+
40
+ def __getitem__(self, key: str):
41
+ return self.data[key]
42
+
43
+ def __setitem__(self, key: str, value: Any):
44
+ self.data[key] = value
45
+
46
+ def __contains__(self, key: str):
47
+ return key in self.data
48
+
49
+ def __len__(self):
50
+ return len(self.data)
51
+
52
+ def __iter__(self):
53
+ return iter(self.data)
54
+
55
+ def keys(self):
56
+ return self.data.keys()
57
+
58
+ def values(self):
59
+ return self.data.values()
60
+
61
+ def items(self):
62
+ return self.data.items()
63
+
64
+ def mean(self):
65
+ mean_st = StatPack()
66
+ for key in self.keys():
67
+ if isinstance(self[key], list):
68
+ # TODO: exclude None values
69
+ non_none_values = [v for v in self[key] if v is not None]
70
+ if non_none_values:
71
+ mean_st[key] = np.mean(np.array(non_none_values))
72
+ else:
73
+ mean_st[key] = None
74
+ return mean_st
75
+
76
+ def store_plots(self, folder: str):
77
+ os.makedirs(folder, exist_ok=True)
78
+ for key in self.keys():
79
+ plt.figure(figsize=(10, 5))
80
+ plt.plot(self[key])
81
+ plt.title(key)
82
+ plt.savefig(os.path.join(folder, f"{key}.pdf"))
83
+ plt.close()
84
+
85
+ def store_numpy(self, folder: str):
86
+ os.makedirs(folder, exist_ok=True)
87
+ for key in self.keys():
88
+ # Sanitize filename components (avoid slashes, spaces, etc.)
89
+ safe_key = str(key).replace(os.sep, "_").replace("/", "_").replace(" ", "_")
90
+ values = self[key]
91
+ # Convert None to NaN for numpy compatibility
92
+ arr = np.array(
93
+ [(np.nan if (v is None) else v) for v in values], dtype=float
94
+ )
95
+ np.save(os.path.join(folder, f"{safe_key}.npy"), arr)
96
+
97
+ def store_json(self, folder: str, filename: str = "stats.json"):
98
+ os.makedirs(folder, exist_ok=True)
99
+ with open(os.path.join(folder, filename), "w") as f:
100
+ json.dump(self.data, f, indent=4)
101
+
102
+ def store_csv(self, folder: str):
103
+ os.makedirs(folder, exist_ok=True)
104
+ for key in self.keys():
105
+ with open(os.path.join(folder, f"stats.csv"), "w") as f:
106
+ writer = csv.writer(f)
107
+ writer.writerow([key] + self[key])
108
+
109
+ def store_pickle(self, folder: str):
110
+ os.makedirs(folder, exist_ok=True)
111
+ for key in self.keys():
112
+ with open(os.path.join(folder, f"stats.pkl"), "wb") as f:
113
+ pickle.dump(self[key], f)
src_code_for_reproducibility/utils/update_start_epoch.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ # During run, set hydra.run.dir=./outputs/{folder}
4
+ def update_start_epoch(cfg, output_directory):
5
+ if cfg["experiment"]["resume_experiment"]:
6
+ folders = [f for f in os.listdir(output_directory) if f.startswith("iteration_")]
7
+ iterations = [int(f.split("_")[1]) for f in folders] if folders else [0]
8
+ cfg["experiment"]["start_epoch"] = max(iterations)
9
+ return None
src_code_for_reproducibility/utils/wandb_utils.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Any, Dict, Optional
3
+
4
+
5
+ _WANDB_AVAILABLE = False
6
+ _WANDB_RUN = None
7
+
8
+
9
+ def _try_import_wandb():
10
+ global _WANDB_AVAILABLE
11
+ if _WANDB_AVAILABLE:
12
+ return True
13
+ try:
14
+ import wandb # type: ignore
15
+
16
+ _WANDB_AVAILABLE = True
17
+ return True
18
+ except Exception:
19
+ _WANDB_AVAILABLE = False
20
+ return False
21
+
22
+
23
+ def _safe_get(cfg: Dict[str, Any], path: list[str], default: Any = None) -> Any:
24
+ cur: Any = cfg
25
+ for key in path:
26
+ if not isinstance(cur, dict) or key not in cur:
27
+ return default
28
+ cur = cur[key]
29
+ return cur
30
+
31
+
32
+ def is_enabled(cfg: Dict[str, Any]) -> bool:
33
+ return bool(_safe_get(cfg, ["logging", "wandb", "enabled"], False))
34
+
35
+
36
+ def init(cfg: Dict[str, Any], run_dir: str, run_name: Optional[str] = None) -> None:
37
+ """
38
+ Initialize Weights & Biases if enabled in config. No-op if disabled or wandb not installed.
39
+ """
40
+ global _WANDB_RUN
41
+ if not is_enabled(cfg):
42
+ return
43
+ if not _try_import_wandb():
44
+ return
45
+
46
+ import wandb # type: ignore
47
+
48
+ project = _safe_get(cfg, ["logging", "wandb", "project"], "llm-negotiation")
49
+ entity = _safe_get(cfg, ["logging", "wandb", "entity"], None)
50
+ mode = _safe_get(cfg, ["logging", "wandb", "mode"], "online")
51
+ tags = _safe_get(cfg, ["logging", "wandb", "tags"], []) or []
52
+ notes = _safe_get(cfg, ["logging", "wandb", "notes"], None)
53
+ group = _safe_get(cfg, ["logging", "wandb", "group"], None)
54
+ name = _safe_get(cfg, ["logging", "wandb", "name"], run_name)
55
+
56
+ # Ensure files are written into the hydra run directory
57
+ os.makedirs(run_dir, exist_ok=True)
58
+ os.environ.setdefault("WANDB_DIR", run_dir)
59
+
60
+ # Convert cfg to plain types for W&B config; fallback to minimal dictionary
61
+ try:
62
+ from omegaconf import OmegaConf # type: ignore
63
+
64
+ cfg_container = OmegaConf.to_container(cfg, resolve=True) # type: ignore
65
+ except Exception:
66
+ cfg_container = cfg
67
+
68
+ _WANDB_RUN = wandb.init(
69
+ project=project,
70
+ entity=entity,
71
+ mode=mode,
72
+ name=name,
73
+ group=group,
74
+ tags=tags,
75
+ notes=notes,
76
+ config=cfg_container,
77
+ dir=run_dir,
78
+ reinit=True,
79
+ )
80
+
81
+
82
+ def log(metrics: Dict[str, Any], step: Optional[int] = None) -> None:
83
+ """Log a flat dictionary of metrics to W&B if active."""
84
+ if not _WANDB_AVAILABLE or _WANDB_RUN is None:
85
+ return
86
+ try:
87
+ import wandb # type: ignore
88
+
89
+ wandb.log(metrics if step is None else dict(metrics, step=step))
90
+ except Exception:
91
+ pass
92
+
93
+
94
+ def _flatten(prefix: str, data: Dict[str, Any], out: Dict[str, Any]) -> None:
95
+ for k, v in data.items():
96
+ key = f"{prefix}.{k}" if prefix else k
97
+ if isinstance(v, dict):
98
+ _flatten(key, v, out)
99
+ else:
100
+ out[key] = v
101
+
102
+
103
+ def _summarize_value(value: Any) -> Dict[str, Any]:
104
+ import numpy as np # local import to avoid hard dependency during disabled mode
105
+
106
+ if value is None:
107
+ return {"none": 1}
108
+ # Scalars
109
+ if isinstance(value, (int, float)):
110
+ return {"value": float(value)}
111
+ # Lists or arrays
112
+ try:
113
+ arr = np.asarray(value)
114
+ if arr.size == 0:
115
+ return {"size": 0}
116
+ return {
117
+ "mean": float(np.nanmean(arr)),
118
+ "min": float(np.nanmin(arr)),
119
+ "max": float(np.nanmax(arr)),
120
+ "last": float(arr.reshape(-1)[-1]),
121
+ "size": int(arr.size),
122
+ }
123
+ except Exception:
124
+ # Fallback: string repr
125
+ return {"text": str(value)}
126
+
127
+
128
+ def log_tally(array_tally: Dict[str, Any], prefix: str = "", step: Optional[int] = None) -> None:
129
+ """
130
+ Flatten and summarize Tally.array_tally and log to WandB.
131
+ Each leaf list/array is summarized with mean/min/max/last/size.
132
+ """
133
+ if not _WANDB_AVAILABLE or _WANDB_RUN is None:
134
+ return
135
+ summarized: Dict[str, Any] = {}
136
+
137
+ def walk(node: Any, path: list[str]):
138
+ if isinstance(node, dict):
139
+ for k, v in node.items():
140
+ walk(v, path + [k])
141
+ return
142
+ # node is a list of values accumulated over time
143
+ key = ".".join([p for p in ([prefix] if prefix else []) + path])
144
+ try:
145
+ summary = _summarize_value(node)
146
+ for sk, sv in summary.items():
147
+ summarized[f"{key}.{sk}"] = sv
148
+ except Exception:
149
+ summarized[f"{key}.error"] = 1
150
+
151
+ walk(array_tally, [])
152
+ if summarized:
153
+ log(summarized, step=step)
154
+
155
+
156
+ def log_flat_stats(stats: Dict[str, Any], prefix: str = "", step: Optional[int] = None) -> None:
157
+ if not _WANDB_AVAILABLE or _WANDB_RUN is None:
158
+ return
159
+ flat: Dict[str, Any] = {}
160
+ _flatten(prefix, stats, flat)
161
+ if flat:
162
+ log(flat, step=step)
163
+
164
+