Add files using upload-large-folder tool
Browse files- .hydra/config.yaml +178 -0
- .hydra/hydra.yaml +154 -0
- .hydra/overrides.yaml +1 -0
- run.log +0 -0
- seed_1/Qwen/Qwen2.5-7B-Instruct/adapters/README.md +207 -0
- seed_1/Qwen/Qwen2.5-7B-Instruct/adapters/agent_adapter/adapter_config.json +42 -0
- seed_1/Qwen/Qwen2.5-7B-Instruct/adapters/critic_adapter/adapter_config.json +42 -0
- src_code_for_reproducibility/__init__.py +0 -0
- src_code_for_reproducibility/markov_games/statistics_runner.py +405 -0
- src_code_for_reproducibility/models/__init__.py +0 -0
- src_code_for_reproducibility/models/adapter_training_wrapper.py +98 -0
- src_code_for_reproducibility/models/human_policy.py +255 -0
- src_code_for_reproducibility/models/inference_backend.py +39 -0
- src_code_for_reproducibility/models/inference_backend_dummy.py +54 -0
- src_code_for_reproducibility/models/inference_backend_sglang.py +86 -0
- src_code_for_reproducibility/models/inference_backend_sglang_local_server.py +127 -0
- src_code_for_reproducibility/models/inference_backend_vllm.py +117 -0
- src_code_for_reproducibility/models/inference_backend_vllm_local_server.py +160 -0
- src_code_for_reproducibility/models/large_language_model_api.py +171 -0
- src_code_for_reproducibility/models/large_language_model_local.py +384 -0
- src_code_for_reproducibility/models/scalar_critic.py +54 -0
- src_code_for_reproducibility/training/README.md +20 -0
- src_code_for_reproducibility/training/__init__.py +0 -0
- src_code_for_reproducibility/training/annealing_methods.py +6 -0
- src_code_for_reproducibility/training/credit_methods.py +295 -0
- src_code_for_reproducibility/training/tally_metrics.py +55 -0
- src_code_for_reproducibility/training/tally_rollout.py +137 -0
- src_code_for_reproducibility/training/tally_tokenwise.py +276 -0
- src_code_for_reproducibility/training/tokenize_chats.py +128 -0
- src_code_for_reproducibility/training/trainer_ad_align.py +492 -0
- src_code_for_reproducibility/training/trainer_common.py +1054 -0
- src_code_for_reproducibility/training/trainer_independent.py +155 -0
- src_code_for_reproducibility/training/trainer_sum_rewards.py +127 -0
- src_code_for_reproducibility/training/training_data_utils.py +394 -0
- src_code_for_reproducibility/utils/__init__.py +0 -0
- src_code_for_reproducibility/utils/dict_get_path.py +12 -0
- src_code_for_reproducibility/utils/format_time.py +7 -0
- src_code_for_reproducibility/utils/gather_training_stats.py +257 -0
- src_code_for_reproducibility/utils/get_coagent_id.py +4 -0
- src_code_for_reproducibility/utils/get_stochastic_game_lengths.py +30 -0
- src_code_for_reproducibility/utils/kill_sglang.py +17 -0
- src_code_for_reproducibility/utils/output_source_code.py +6 -0
- src_code_for_reproducibility/utils/resource_context.py +78 -0
- src_code_for_reproducibility/utils/rollout_tree_chat_htmls.py +664 -0
- src_code_for_reproducibility/utils/rollout_tree_gather_utils.py +314 -0
- src_code_for_reproducibility/utils/rollout_tree_stats.py +50 -0
- src_code_for_reproducibility/utils/short_id_gen.py +11 -0
- src_code_for_reproducibility/utils/stat_pack.py +113 -0
- src_code_for_reproducibility/utils/update_start_epoch.py +9 -0
- src_code_for_reproducibility/utils/wandb_utils.py +164 -0
.hydra/config.yaml
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
experiment:
|
| 2 |
+
wandb_enabled: true
|
| 3 |
+
nb_epochs: 3000
|
| 4 |
+
nb_matches_per_iteration: 64
|
| 5 |
+
reinit_matches_each_it: true
|
| 6 |
+
checkpoint_every_n_iterations: 10
|
| 7 |
+
start_epoch: 0
|
| 8 |
+
resume_experiment: true
|
| 9 |
+
base_seed: 1
|
| 10 |
+
seed_group_size: 8
|
| 11 |
+
train: true
|
| 12 |
+
stat_methods_for_live_wandb: mllm.markov_games.negotiation.negotiation_statistics
|
| 13 |
+
name: no_press_10_1_ties_ad_align_nocurrtimestep_seed1
|
| 14 |
+
agent_buffer: true
|
| 15 |
+
keep_agent_buffer_count: ${lora_count}
|
| 16 |
+
agent_buffer_recent_k: -1
|
| 17 |
+
logging:
|
| 18 |
+
wandb:
|
| 19 |
+
enabled: false
|
| 20 |
+
project: llm-negotiation
|
| 21 |
+
entity: null
|
| 22 |
+
mode: online
|
| 23 |
+
name: null
|
| 24 |
+
group: null
|
| 25 |
+
tags: []
|
| 26 |
+
notes: null
|
| 27 |
+
temperature: 1.0
|
| 28 |
+
markov_games:
|
| 29 |
+
runner_method_name: LinearRunner
|
| 30 |
+
runner_kwargs: {}
|
| 31 |
+
group_by_round: true
|
| 32 |
+
simulation_class_name: NoPressSimulation
|
| 33 |
+
simulation_init_args:
|
| 34 |
+
nb_of_rounds: 10
|
| 35 |
+
quota_messages_per_agent_per_round: 0
|
| 36 |
+
game_type: 10-1-ties
|
| 37 |
+
atleast_one_conflict: true
|
| 38 |
+
item_types:
|
| 39 |
+
- hats
|
| 40 |
+
- books
|
| 41 |
+
- balls
|
| 42 |
+
agents:
|
| 43 |
+
0:
|
| 44 |
+
agent_id: ${agent_0_id}
|
| 45 |
+
agent_name: Alice
|
| 46 |
+
agent_class_name: NoPressAgent
|
| 47 |
+
policy_id: base_llm/agent_adapter
|
| 48 |
+
init_kwargs:
|
| 49 |
+
goal: Maximize your total points over the whole game.
|
| 50 |
+
1:
|
| 51 |
+
agent_id: ${agent_1_id}
|
| 52 |
+
agent_name: Bob
|
| 53 |
+
agent_class_name: NoPressAgent
|
| 54 |
+
policy_id: base_llm/agent_adapter
|
| 55 |
+
init_kwargs:
|
| 56 |
+
goal: Maximize your total points over the whole game.
|
| 57 |
+
models:
|
| 58 |
+
base_llm:
|
| 59 |
+
class: LeanLocalLLM
|
| 60 |
+
init_args:
|
| 61 |
+
llm_id: base_llm
|
| 62 |
+
model_name: Qwen/Qwen2.5-7B-Instruct
|
| 63 |
+
inference_backend: vllm
|
| 64 |
+
hf_kwargs:
|
| 65 |
+
device_map: auto
|
| 66 |
+
torch_dtype: bfloat16
|
| 67 |
+
max_memory:
|
| 68 |
+
0: 20GiB
|
| 69 |
+
attn_implementation: flash_attention_2
|
| 70 |
+
inference_backend_init_kwargs:
|
| 71 |
+
enable_lora: true
|
| 72 |
+
seed: ${experiment.base_seed}
|
| 73 |
+
enable_prefix_caching: true
|
| 74 |
+
max_model_len: 10000.0
|
| 75 |
+
gpu_memory_utilization: 0.5
|
| 76 |
+
dtype: bfloat16
|
| 77 |
+
trust_remote_code: true
|
| 78 |
+
max_lora_rank: 32
|
| 79 |
+
enforce_eager: false
|
| 80 |
+
max_loras: ${lora_count}
|
| 81 |
+
max_cpu_loras: ${lora_count}
|
| 82 |
+
enable_sleep_mode: true
|
| 83 |
+
inference_backend_sampling_params:
|
| 84 |
+
temperature: ${temperature}
|
| 85 |
+
top_p: 1.0
|
| 86 |
+
max_tokens: 400
|
| 87 |
+
top_k: -1
|
| 88 |
+
logprobs: 0
|
| 89 |
+
adapter_configs:
|
| 90 |
+
agent_adapter:
|
| 91 |
+
task_type: CAUSAL_LM
|
| 92 |
+
r: 32
|
| 93 |
+
lora_alpha: 64
|
| 94 |
+
lora_dropout: 0.0
|
| 95 |
+
target_modules: all-linear
|
| 96 |
+
critic_adapter:
|
| 97 |
+
task_type: CAUSAL_LM
|
| 98 |
+
r: 32
|
| 99 |
+
lora_alpha: 64
|
| 100 |
+
lora_dropout: 0.0
|
| 101 |
+
target_modules: all-linear
|
| 102 |
+
enable_thinking: null
|
| 103 |
+
regex_max_attempts: 3
|
| 104 |
+
critics:
|
| 105 |
+
agent_critic:
|
| 106 |
+
module_pointer:
|
| 107 |
+
- base_llm
|
| 108 |
+
- critic_adapter
|
| 109 |
+
optimizers:
|
| 110 |
+
agent_optimizer:
|
| 111 |
+
module_pointer:
|
| 112 |
+
- base_llm
|
| 113 |
+
- agent_adapter
|
| 114 |
+
optimizer_class_name: torch.optim.Adam
|
| 115 |
+
init_args:
|
| 116 |
+
lr: 3.0e-06
|
| 117 |
+
weight_decay: 0.0
|
| 118 |
+
critic_optimizer:
|
| 119 |
+
module_pointer: agent_critic
|
| 120 |
+
optimizer_class_name: torch.optim.Adam
|
| 121 |
+
init_args:
|
| 122 |
+
lr: 3.0e-06
|
| 123 |
+
weight_decay: 0.0
|
| 124 |
+
trainers:
|
| 125 |
+
agent_trainer:
|
| 126 |
+
class: TrainerAdAlign
|
| 127 |
+
module_pointers:
|
| 128 |
+
policy:
|
| 129 |
+
- base_llm
|
| 130 |
+
- agent_adapter
|
| 131 |
+
policy_optimizer: agent_optimizer
|
| 132 |
+
critic: agent_critic
|
| 133 |
+
critic_optimizer: critic_optimizer
|
| 134 |
+
kwargs:
|
| 135 |
+
entropy_coeff: 0.0
|
| 136 |
+
entropy_topk: null
|
| 137 |
+
entropy_mask_regex: null
|
| 138 |
+
kl_coeff: 0.001
|
| 139 |
+
gradient_clipping: 1.0
|
| 140 |
+
restrict_tokens: null
|
| 141 |
+
mini_batch_size: 1
|
| 142 |
+
use_gradient_checkpointing: false
|
| 143 |
+
temperature: ${temperature}
|
| 144 |
+
device: cuda:0
|
| 145 |
+
use_gae: false
|
| 146 |
+
whiten_advantages: false
|
| 147 |
+
whiten_advantages_time_step_wise: false
|
| 148 |
+
skip_discounted_state_visitation: true
|
| 149 |
+
use_gae_lambda_annealing: false
|
| 150 |
+
gae_lambda_annealing_method: None
|
| 151 |
+
gae_lambda_annealing_method_params: None
|
| 152 |
+
gae_lambda_annealing_limit: 0.95
|
| 153 |
+
discount_factor: 0.9
|
| 154 |
+
use_rloo: true
|
| 155 |
+
enable_tokenwise_logging: false
|
| 156 |
+
pg_loss_normalization: nb_tokens
|
| 157 |
+
truncated_importance_sampling_ratio_cap: 2.0
|
| 158 |
+
reward_normalizing_constant: 100.0
|
| 159 |
+
ad_align_force_coop_first_step: false
|
| 160 |
+
ad_align_clipping: null
|
| 161 |
+
ad_align_gamma: 0.9
|
| 162 |
+
ad_align_exclude_k_equals_t: true
|
| 163 |
+
ad_align_use_sign: false
|
| 164 |
+
ad_align_beta: 1.0
|
| 165 |
+
use_old_ad_align: true
|
| 166 |
+
use_time_regularization: false
|
| 167 |
+
rloo_branch: false
|
| 168 |
+
reuse_baseline: false
|
| 169 |
+
train_on_which_data:
|
| 170 |
+
agent_trainer: ${agent_ids}
|
| 171 |
+
lora_count: 30
|
| 172 |
+
common_agent_kwargs:
|
| 173 |
+
goal: Maximize your total points over the whole game.
|
| 174 |
+
agent_0_id: Alice
|
| 175 |
+
agent_1_id: Bob
|
| 176 |
+
agent_ids:
|
| 177 |
+
- Alice
|
| 178 |
+
- Bob
|
.hydra/hydra.yaml
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
hydra:
|
| 2 |
+
run:
|
| 3 |
+
dir: ${oc.env:SCRATCH}/llm_negotiation/${now:%Y_%m}/${experiment.name}
|
| 4 |
+
sweep:
|
| 5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
| 6 |
+
subdir: ${hydra.job.num}
|
| 7 |
+
launcher:
|
| 8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
| 9 |
+
sweeper:
|
| 10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
| 11 |
+
max_batch_size: null
|
| 12 |
+
params: null
|
| 13 |
+
help:
|
| 14 |
+
app_name: ${hydra.job.name}
|
| 15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
| 16 |
+
|
| 17 |
+
'
|
| 18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
| 19 |
+
|
| 20 |
+
Use --hydra-help to view Hydra specific help
|
| 21 |
+
|
| 22 |
+
'
|
| 23 |
+
template: '${hydra.help.header}
|
| 24 |
+
|
| 25 |
+
== Configuration groups ==
|
| 26 |
+
|
| 27 |
+
Compose your configuration from those groups (group=option)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
$APP_CONFIG_GROUPS
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
== Config ==
|
| 34 |
+
|
| 35 |
+
Override anything in the config (foo.bar=value)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
$CONFIG
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
${hydra.help.footer}
|
| 42 |
+
|
| 43 |
+
'
|
| 44 |
+
hydra_help:
|
| 45 |
+
template: 'Hydra (${hydra.runtime.version})
|
| 46 |
+
|
| 47 |
+
See https://hydra.cc for more info.
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
== Flags ==
|
| 51 |
+
|
| 52 |
+
$FLAGS_HELP
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
== Configuration groups ==
|
| 56 |
+
|
| 57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
| 58 |
+
to command line)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
$HYDRA_CONFIG_GROUPS
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
| 65 |
+
|
| 66 |
+
'
|
| 67 |
+
hydra_help: ???
|
| 68 |
+
hydra_logging:
|
| 69 |
+
version: 1
|
| 70 |
+
formatters:
|
| 71 |
+
simple:
|
| 72 |
+
format: '[%(asctime)s][HYDRA] %(message)s'
|
| 73 |
+
handlers:
|
| 74 |
+
console:
|
| 75 |
+
class: logging.StreamHandler
|
| 76 |
+
formatter: simple
|
| 77 |
+
stream: ext://sys.stdout
|
| 78 |
+
root:
|
| 79 |
+
level: INFO
|
| 80 |
+
handlers:
|
| 81 |
+
- console
|
| 82 |
+
loggers:
|
| 83 |
+
logging_example:
|
| 84 |
+
level: DEBUG
|
| 85 |
+
disable_existing_loggers: false
|
| 86 |
+
job_logging:
|
| 87 |
+
version: 1
|
| 88 |
+
formatters:
|
| 89 |
+
simple:
|
| 90 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
| 91 |
+
handlers:
|
| 92 |
+
console:
|
| 93 |
+
class: logging.StreamHandler
|
| 94 |
+
formatter: simple
|
| 95 |
+
stream: ext://sys.stdout
|
| 96 |
+
file:
|
| 97 |
+
class: logging.FileHandler
|
| 98 |
+
formatter: simple
|
| 99 |
+
filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
|
| 100 |
+
root:
|
| 101 |
+
level: INFO
|
| 102 |
+
handlers:
|
| 103 |
+
- console
|
| 104 |
+
- file
|
| 105 |
+
disable_existing_loggers: false
|
| 106 |
+
env: {}
|
| 107 |
+
mode: RUN
|
| 108 |
+
searchpath: []
|
| 109 |
+
callbacks: {}
|
| 110 |
+
output_subdir: .hydra
|
| 111 |
+
overrides:
|
| 112 |
+
hydra:
|
| 113 |
+
- hydra.mode=RUN
|
| 114 |
+
task: []
|
| 115 |
+
job:
|
| 116 |
+
name: run
|
| 117 |
+
chdir: false
|
| 118 |
+
override_dirname: ''
|
| 119 |
+
id: ???
|
| 120 |
+
num: ???
|
| 121 |
+
config_name: no_press_10_1_ties_ad_align_nocurrtimestep_seed1.yaml
|
| 122 |
+
env_set: {}
|
| 123 |
+
env_copy: []
|
| 124 |
+
config:
|
| 125 |
+
override_dirname:
|
| 126 |
+
kv_sep: '='
|
| 127 |
+
item_sep: ','
|
| 128 |
+
exclude_keys: []
|
| 129 |
+
runtime:
|
| 130 |
+
version: 1.3.2
|
| 131 |
+
version_base: '1.1'
|
| 132 |
+
cwd: /scratch/m/muqeeth/llm_negotiation
|
| 133 |
+
config_sources:
|
| 134 |
+
- path: hydra.conf
|
| 135 |
+
schema: pkg
|
| 136 |
+
provider: hydra
|
| 137 |
+
- path: /scratch/m/muqeeth/llm_negotiation/configs
|
| 138 |
+
schema: file
|
| 139 |
+
provider: main
|
| 140 |
+
- path: ''
|
| 141 |
+
schema: structured
|
| 142 |
+
provider: schema
|
| 143 |
+
output_dir: /scratch/m/muqeeth/llm_negotiation/2025_11/no_press_10_1_ties_ad_align_nocurrtimestep_seed1
|
| 144 |
+
choices:
|
| 145 |
+
hydra/env: default
|
| 146 |
+
hydra/callbacks: null
|
| 147 |
+
hydra/job_logging: default
|
| 148 |
+
hydra/hydra_logging: default
|
| 149 |
+
hydra/hydra_help: default
|
| 150 |
+
hydra/help: default
|
| 151 |
+
hydra/sweeper: basic
|
| 152 |
+
hydra/launcher: basic
|
| 153 |
+
hydra/output: default
|
| 154 |
+
verbose: false
|
.hydra/overrides.yaml
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
[]
|
run.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
seed_1/Qwen/Qwen2.5-7B-Instruct/adapters/README.md
ADDED
|
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
base_model: Qwen/Qwen2.5-7B-Instruct
|
| 3 |
+
library_name: peft
|
| 4 |
+
pipeline_tag: text-generation
|
| 5 |
+
tags:
|
| 6 |
+
- base_model:adapter:Qwen/Qwen2.5-7B-Instruct
|
| 7 |
+
- lora
|
| 8 |
+
- transformers
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
# Model Card for Model ID
|
| 12 |
+
|
| 13 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
## Model Details
|
| 18 |
+
|
| 19 |
+
### Model Description
|
| 20 |
+
|
| 21 |
+
<!-- Provide a longer summary of what this model is. -->
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
- **Developed by:** [More Information Needed]
|
| 26 |
+
- **Funded by [optional]:** [More Information Needed]
|
| 27 |
+
- **Shared by [optional]:** [More Information Needed]
|
| 28 |
+
- **Model type:** [More Information Needed]
|
| 29 |
+
- **Language(s) (NLP):** [More Information Needed]
|
| 30 |
+
- **License:** [More Information Needed]
|
| 31 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
| 32 |
+
|
| 33 |
+
### Model Sources [optional]
|
| 34 |
+
|
| 35 |
+
<!-- Provide the basic links for the model. -->
|
| 36 |
+
|
| 37 |
+
- **Repository:** [More Information Needed]
|
| 38 |
+
- **Paper [optional]:** [More Information Needed]
|
| 39 |
+
- **Demo [optional]:** [More Information Needed]
|
| 40 |
+
|
| 41 |
+
## Uses
|
| 42 |
+
|
| 43 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
| 44 |
+
|
| 45 |
+
### Direct Use
|
| 46 |
+
|
| 47 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
| 48 |
+
|
| 49 |
+
[More Information Needed]
|
| 50 |
+
|
| 51 |
+
### Downstream Use [optional]
|
| 52 |
+
|
| 53 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
| 54 |
+
|
| 55 |
+
[More Information Needed]
|
| 56 |
+
|
| 57 |
+
### Out-of-Scope Use
|
| 58 |
+
|
| 59 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
| 60 |
+
|
| 61 |
+
[More Information Needed]
|
| 62 |
+
|
| 63 |
+
## Bias, Risks, and Limitations
|
| 64 |
+
|
| 65 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
| 66 |
+
|
| 67 |
+
[More Information Needed]
|
| 68 |
+
|
| 69 |
+
### Recommendations
|
| 70 |
+
|
| 71 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
| 72 |
+
|
| 73 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
| 74 |
+
|
| 75 |
+
## How to Get Started with the Model
|
| 76 |
+
|
| 77 |
+
Use the code below to get started with the model.
|
| 78 |
+
|
| 79 |
+
[More Information Needed]
|
| 80 |
+
|
| 81 |
+
## Training Details
|
| 82 |
+
|
| 83 |
+
### Training Data
|
| 84 |
+
|
| 85 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
| 86 |
+
|
| 87 |
+
[More Information Needed]
|
| 88 |
+
|
| 89 |
+
### Training Procedure
|
| 90 |
+
|
| 91 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
| 92 |
+
|
| 93 |
+
#### Preprocessing [optional]
|
| 94 |
+
|
| 95 |
+
[More Information Needed]
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
#### Training Hyperparameters
|
| 99 |
+
|
| 100 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
| 101 |
+
|
| 102 |
+
#### Speeds, Sizes, Times [optional]
|
| 103 |
+
|
| 104 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
| 105 |
+
|
| 106 |
+
[More Information Needed]
|
| 107 |
+
|
| 108 |
+
## Evaluation
|
| 109 |
+
|
| 110 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
| 111 |
+
|
| 112 |
+
### Testing Data, Factors & Metrics
|
| 113 |
+
|
| 114 |
+
#### Testing Data
|
| 115 |
+
|
| 116 |
+
<!-- This should link to a Dataset Card if possible. -->
|
| 117 |
+
|
| 118 |
+
[More Information Needed]
|
| 119 |
+
|
| 120 |
+
#### Factors
|
| 121 |
+
|
| 122 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
| 123 |
+
|
| 124 |
+
[More Information Needed]
|
| 125 |
+
|
| 126 |
+
#### Metrics
|
| 127 |
+
|
| 128 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
| 129 |
+
|
| 130 |
+
[More Information Needed]
|
| 131 |
+
|
| 132 |
+
### Results
|
| 133 |
+
|
| 134 |
+
[More Information Needed]
|
| 135 |
+
|
| 136 |
+
#### Summary
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
## Model Examination [optional]
|
| 141 |
+
|
| 142 |
+
<!-- Relevant interpretability work for the model goes here -->
|
| 143 |
+
|
| 144 |
+
[More Information Needed]
|
| 145 |
+
|
| 146 |
+
## Environmental Impact
|
| 147 |
+
|
| 148 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
| 149 |
+
|
| 150 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
| 151 |
+
|
| 152 |
+
- **Hardware Type:** [More Information Needed]
|
| 153 |
+
- **Hours used:** [More Information Needed]
|
| 154 |
+
- **Cloud Provider:** [More Information Needed]
|
| 155 |
+
- **Compute Region:** [More Information Needed]
|
| 156 |
+
- **Carbon Emitted:** [More Information Needed]
|
| 157 |
+
|
| 158 |
+
## Technical Specifications [optional]
|
| 159 |
+
|
| 160 |
+
### Model Architecture and Objective
|
| 161 |
+
|
| 162 |
+
[More Information Needed]
|
| 163 |
+
|
| 164 |
+
### Compute Infrastructure
|
| 165 |
+
|
| 166 |
+
[More Information Needed]
|
| 167 |
+
|
| 168 |
+
#### Hardware
|
| 169 |
+
|
| 170 |
+
[More Information Needed]
|
| 171 |
+
|
| 172 |
+
#### Software
|
| 173 |
+
|
| 174 |
+
[More Information Needed]
|
| 175 |
+
|
| 176 |
+
## Citation [optional]
|
| 177 |
+
|
| 178 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
| 179 |
+
|
| 180 |
+
**BibTeX:**
|
| 181 |
+
|
| 182 |
+
[More Information Needed]
|
| 183 |
+
|
| 184 |
+
**APA:**
|
| 185 |
+
|
| 186 |
+
[More Information Needed]
|
| 187 |
+
|
| 188 |
+
## Glossary [optional]
|
| 189 |
+
|
| 190 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
| 191 |
+
|
| 192 |
+
[More Information Needed]
|
| 193 |
+
|
| 194 |
+
## More Information [optional]
|
| 195 |
+
|
| 196 |
+
[More Information Needed]
|
| 197 |
+
|
| 198 |
+
## Model Card Authors [optional]
|
| 199 |
+
|
| 200 |
+
[More Information Needed]
|
| 201 |
+
|
| 202 |
+
## Model Card Contact
|
| 203 |
+
|
| 204 |
+
[More Information Needed]
|
| 205 |
+
### Framework versions
|
| 206 |
+
|
| 207 |
+
- PEFT 0.17.1
|
seed_1/Qwen/Qwen2.5-7B-Instruct/adapters/agent_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": "Qwen/Qwen2.5-7B-Instruct",
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"corda_config": null,
|
| 7 |
+
"eva_config": null,
|
| 8 |
+
"exclude_modules": null,
|
| 9 |
+
"fan_in_fan_out": false,
|
| 10 |
+
"inference_mode": true,
|
| 11 |
+
"init_lora_weights": true,
|
| 12 |
+
"layer_replication": null,
|
| 13 |
+
"layers_pattern": null,
|
| 14 |
+
"layers_to_transform": null,
|
| 15 |
+
"loftq_config": {},
|
| 16 |
+
"lora_alpha": 64,
|
| 17 |
+
"lora_bias": false,
|
| 18 |
+
"lora_dropout": 0.0,
|
| 19 |
+
"megatron_config": null,
|
| 20 |
+
"megatron_core": "megatron.core",
|
| 21 |
+
"modules_to_save": null,
|
| 22 |
+
"peft_type": "LORA",
|
| 23 |
+
"qalora_group_size": 16,
|
| 24 |
+
"r": 32,
|
| 25 |
+
"rank_pattern": {},
|
| 26 |
+
"revision": null,
|
| 27 |
+
"target_modules": [
|
| 28 |
+
"up_proj",
|
| 29 |
+
"k_proj",
|
| 30 |
+
"down_proj",
|
| 31 |
+
"gate_proj",
|
| 32 |
+
"q_proj",
|
| 33 |
+
"o_proj",
|
| 34 |
+
"v_proj"
|
| 35 |
+
],
|
| 36 |
+
"target_parameters": null,
|
| 37 |
+
"task_type": "CAUSAL_LM",
|
| 38 |
+
"trainable_token_indices": null,
|
| 39 |
+
"use_dora": false,
|
| 40 |
+
"use_qalora": false,
|
| 41 |
+
"use_rslora": false
|
| 42 |
+
}
|
seed_1/Qwen/Qwen2.5-7B-Instruct/adapters/critic_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": "Qwen/Qwen2.5-7B-Instruct",
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"corda_config": null,
|
| 7 |
+
"eva_config": null,
|
| 8 |
+
"exclude_modules": null,
|
| 9 |
+
"fan_in_fan_out": false,
|
| 10 |
+
"inference_mode": true,
|
| 11 |
+
"init_lora_weights": true,
|
| 12 |
+
"layer_replication": null,
|
| 13 |
+
"layers_pattern": null,
|
| 14 |
+
"layers_to_transform": null,
|
| 15 |
+
"loftq_config": {},
|
| 16 |
+
"lora_alpha": 64,
|
| 17 |
+
"lora_bias": false,
|
| 18 |
+
"lora_dropout": 0.0,
|
| 19 |
+
"megatron_config": null,
|
| 20 |
+
"megatron_core": "megatron.core",
|
| 21 |
+
"modules_to_save": null,
|
| 22 |
+
"peft_type": "LORA",
|
| 23 |
+
"qalora_group_size": 16,
|
| 24 |
+
"r": 32,
|
| 25 |
+
"rank_pattern": {},
|
| 26 |
+
"revision": null,
|
| 27 |
+
"target_modules": [
|
| 28 |
+
"up_proj",
|
| 29 |
+
"k_proj",
|
| 30 |
+
"down_proj",
|
| 31 |
+
"gate_proj",
|
| 32 |
+
"q_proj",
|
| 33 |
+
"o_proj",
|
| 34 |
+
"v_proj"
|
| 35 |
+
],
|
| 36 |
+
"target_parameters": null,
|
| 37 |
+
"task_type": "CAUSAL_LM",
|
| 38 |
+
"trainable_token_indices": null,
|
| 39 |
+
"use_dora": false,
|
| 40 |
+
"use_qalora": false,
|
| 41 |
+
"use_rslora": false
|
| 42 |
+
}
|
src_code_for_reproducibility/__init__.py
ADDED
|
File without changes
|
src_code_for_reproducibility/markov_games/statistics_runner.py
ADDED
|
@@ -0,0 +1,405 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import gc
|
| 4 |
+
import json
|
| 5 |
+
import pickle
|
| 6 |
+
from dataclasses import dataclass
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional
|
| 9 |
+
|
| 10 |
+
from basic_render import find_iteration_folders
|
| 11 |
+
|
| 12 |
+
from mllm.markov_games.rollout_tree import (
|
| 13 |
+
RolloutTreeBranchNode,
|
| 14 |
+
RolloutTreeNode,
|
| 15 |
+
RolloutTreeRootNode,
|
| 16 |
+
SimulationStepLog,
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def _iterate_main_nodes(root: RolloutTreeRootNode) -> Iterator[RolloutTreeNode]:
|
| 21 |
+
"""
|
| 22 |
+
Iterate the main path nodes without materializing full path lists.
|
| 23 |
+
"""
|
| 24 |
+
current = root.child
|
| 25 |
+
while current is not None:
|
| 26 |
+
if isinstance(current, RolloutTreeNode):
|
| 27 |
+
yield current
|
| 28 |
+
current = current.child
|
| 29 |
+
elif isinstance(current, RolloutTreeBranchNode):
|
| 30 |
+
# Follow only the main child on the main trajectory
|
| 31 |
+
current = current.main_child
|
| 32 |
+
else:
|
| 33 |
+
break
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def iterate_main_simulation_logs(
|
| 37 |
+
root: RolloutTreeRootNode,
|
| 38 |
+
) -> Iterator[SimulationStepLog]:
|
| 39 |
+
for node in _iterate_main_nodes(root):
|
| 40 |
+
yield node.step_log.simulation_step_log
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def stream_rollout_files(iteration_folder: Path) -> Iterator[Path]:
|
| 44 |
+
for p in iteration_folder.rglob("*.rt.pkl"):
|
| 45 |
+
if p.is_file():
|
| 46 |
+
yield p
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def load_root(path: Path) -> RolloutTreeRootNode:
|
| 50 |
+
with open(path, "rb") as f:
|
| 51 |
+
data = pickle.load(f)
|
| 52 |
+
return RolloutTreeRootNode.model_validate(data)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
@dataclass
|
| 56 |
+
class StatRecord:
|
| 57 |
+
mgid: int
|
| 58 |
+
crn_id: Optional[int]
|
| 59 |
+
iteration: str
|
| 60 |
+
values: Dict[str, Any]
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class StatComputer:
|
| 64 |
+
"""
|
| 65 |
+
Stateful stat computer that consumes SimulationStepLog instances
|
| 66 |
+
and produces final aggregated values for one rollout (mgid).
|
| 67 |
+
"""
|
| 68 |
+
|
| 69 |
+
def update(self, sl: SimulationStepLog) -> None: # pragma: no cover - interface
|
| 70 |
+
raise NotImplementedError
|
| 71 |
+
|
| 72 |
+
def finalize(self) -> Dict[str, Any]: # pragma: no cover - interface
|
| 73 |
+
raise NotImplementedError
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def run_stats(
|
| 77 |
+
data_root: Path,
|
| 78 |
+
game_name: str,
|
| 79 |
+
make_computers: Callable[[], List[StatComputer]],
|
| 80 |
+
output_filename: Optional[str] = None,
|
| 81 |
+
output_format: str = "json", # "json" (dict of lists) or "jsonl"
|
| 82 |
+
) -> Path:
|
| 83 |
+
"""
|
| 84 |
+
Compute stats across all iteration_* folders under data_root.
|
| 85 |
+
Writes JSONL to data_root/statistics/<output_filename or f"{game_name}.stats.jsonl">.
|
| 86 |
+
"""
|
| 87 |
+
data_root = Path(data_root)
|
| 88 |
+
outdir = data_root / "statistics"
|
| 89 |
+
outdir.mkdir(parents=True, exist_ok=True)
|
| 90 |
+
# Choose extension by format
|
| 91 |
+
default_name = (
|
| 92 |
+
f"{game_name}.stats.json"
|
| 93 |
+
if output_format == "json"
|
| 94 |
+
else f"{game_name}.stats.jsonl"
|
| 95 |
+
)
|
| 96 |
+
outfile = outdir / (
|
| 97 |
+
output_filename if output_filename is not None else default_name
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
# Rewrite file each run to keep it clean and small
|
| 101 |
+
if outfile.exists():
|
| 102 |
+
outfile.unlink()
|
| 103 |
+
|
| 104 |
+
iteration_folders = find_iteration_folders(str(data_root))
|
| 105 |
+
|
| 106 |
+
# If writing JSONL, stream directly; otherwise accumulate minimal records
|
| 107 |
+
if output_format == "jsonl":
|
| 108 |
+
with open(outfile, "w", encoding="utf-8") as w:
|
| 109 |
+
for iteration_folder in iteration_folders:
|
| 110 |
+
iteration_name = Path(iteration_folder).name
|
| 111 |
+
for pkl_path in stream_rollout_files(Path(iteration_folder)):
|
| 112 |
+
root = load_root(pkl_path)
|
| 113 |
+
|
| 114 |
+
computers = make_computers()
|
| 115 |
+
for sl in iterate_main_simulation_logs(root):
|
| 116 |
+
for comp in computers:
|
| 117 |
+
try:
|
| 118 |
+
comp.update(sl)
|
| 119 |
+
except Exception:
|
| 120 |
+
continue
|
| 121 |
+
|
| 122 |
+
values: Dict[str, Any] = {}
|
| 123 |
+
for comp in computers:
|
| 124 |
+
try:
|
| 125 |
+
values.update(comp.finalize())
|
| 126 |
+
except Exception:
|
| 127 |
+
continue
|
| 128 |
+
|
| 129 |
+
rec = {
|
| 130 |
+
"mgid": getattr(root, "id", None),
|
| 131 |
+
"crn_id": getattr(root, "crn_id", None),
|
| 132 |
+
"iteration": iteration_name,
|
| 133 |
+
"stats": values,
|
| 134 |
+
}
|
| 135 |
+
w.write(json.dumps(rec, ensure_ascii=False) + "\n")
|
| 136 |
+
|
| 137 |
+
del root
|
| 138 |
+
del computers
|
| 139 |
+
gc.collect()
|
| 140 |
+
else:
|
| 141 |
+
# Aggregate to dict-of-lists for easier plotting
|
| 142 |
+
records: List[Dict[str, Any]] = []
|
| 143 |
+
# Process in deterministic order
|
| 144 |
+
for iteration_folder in iteration_folders:
|
| 145 |
+
iteration_name = Path(iteration_folder).name
|
| 146 |
+
for pkl_path in stream_rollout_files(Path(iteration_folder)):
|
| 147 |
+
root = load_root(pkl_path)
|
| 148 |
+
|
| 149 |
+
computers = make_computers()
|
| 150 |
+
for sl in iterate_main_simulation_logs(root):
|
| 151 |
+
for comp in computers:
|
| 152 |
+
try:
|
| 153 |
+
comp.update(sl)
|
| 154 |
+
except Exception:
|
| 155 |
+
continue
|
| 156 |
+
|
| 157 |
+
values: Dict[str, Any] = {}
|
| 158 |
+
for comp in computers:
|
| 159 |
+
try:
|
| 160 |
+
values.update(comp.finalize())
|
| 161 |
+
except Exception:
|
| 162 |
+
continue
|
| 163 |
+
|
| 164 |
+
records.append(
|
| 165 |
+
{
|
| 166 |
+
"mgid": getattr(root, "id", None),
|
| 167 |
+
"crn_id": getattr(root, "crn_id", None),
|
| 168 |
+
"iteration": iteration_name,
|
| 169 |
+
"stats": values,
|
| 170 |
+
}
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
del root
|
| 174 |
+
del computers
|
| 175 |
+
gc.collect()
|
| 176 |
+
|
| 177 |
+
# Build dict-of-lists with nested stats preserved
|
| 178 |
+
# Collect all stat keys and nested agent keys where needed
|
| 179 |
+
mgids: List[Any] = []
|
| 180 |
+
crn_ids: List[Any] = []
|
| 181 |
+
iterations_out: List[str] = []
|
| 182 |
+
# stats_out is a nested structure mirroring keys but with lists
|
| 183 |
+
stats_out: Dict[str, Any] = {}
|
| 184 |
+
|
| 185 |
+
# First pass to collect union of keys
|
| 186 |
+
stat_keys: set[str] = set()
|
| 187 |
+
nested_agent_keys: Dict[str, set[str]] = {}
|
| 188 |
+
for r in records:
|
| 189 |
+
stats = r.get("stats", {}) or {}
|
| 190 |
+
for k, v in stats.items():
|
| 191 |
+
stat_keys.add(k)
|
| 192 |
+
if isinstance(v, dict):
|
| 193 |
+
nested = nested_agent_keys.setdefault(k, set())
|
| 194 |
+
for ak in v.keys():
|
| 195 |
+
nested.add(str(ak))
|
| 196 |
+
|
| 197 |
+
# Initialize structure
|
| 198 |
+
for k in stat_keys:
|
| 199 |
+
if k in nested_agent_keys:
|
| 200 |
+
stats_out[k] = {ak: [] for ak in sorted(nested_agent_keys[k])}
|
| 201 |
+
else:
|
| 202 |
+
stats_out[k] = []
|
| 203 |
+
|
| 204 |
+
# Fill lists
|
| 205 |
+
for r in records:
|
| 206 |
+
mgids.append(r.get("mgid"))
|
| 207 |
+
crn_ids.append(r.get("crn_id"))
|
| 208 |
+
iterations_out.append(r.get("iteration"))
|
| 209 |
+
stats = r.get("stats", {}) or {}
|
| 210 |
+
for k in stat_keys:
|
| 211 |
+
val = stats.get(k)
|
| 212 |
+
if isinstance(stats_out[k], dict):
|
| 213 |
+
# per-agent dict
|
| 214 |
+
agent_dict = val if isinstance(val, dict) else {}
|
| 215 |
+
for ak in stats_out[k].keys():
|
| 216 |
+
stats_out[k][ak].append(agent_dict.get(ak))
|
| 217 |
+
else:
|
| 218 |
+
stats_out[k].append(val)
|
| 219 |
+
|
| 220 |
+
with open(outfile, "w", encoding="utf-8") as w:
|
| 221 |
+
json.dump(
|
| 222 |
+
{
|
| 223 |
+
"mgid": mgids,
|
| 224 |
+
"crn_id": crn_ids,
|
| 225 |
+
"iteration": iterations_out,
|
| 226 |
+
"stats": stats_out,
|
| 227 |
+
},
|
| 228 |
+
w,
|
| 229 |
+
ensure_ascii=False,
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
return outfile
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
def run_stats_functional(
|
| 236 |
+
data_root: Path,
|
| 237 |
+
game_name: str,
|
| 238 |
+
metrics: Dict[str, Callable[[SimulationStepLog], Optional[Dict[str, float]]]],
|
| 239 |
+
output_filename: Optional[str] = None,
|
| 240 |
+
output_format: str = "json",
|
| 241 |
+
) -> Path:
|
| 242 |
+
"""
|
| 243 |
+
Functional variant where metrics is a dict of name -> f(SimulationStepLog) -> {agent_id: value}.
|
| 244 |
+
Aggregates per rollout by averaging over steps where a metric produced a value.
|
| 245 |
+
Writes a single consolidated file in data_root/statistics/.
|
| 246 |
+
"""
|
| 247 |
+
data_root = Path(data_root)
|
| 248 |
+
outdir = data_root / "statistics"
|
| 249 |
+
outdir.mkdir(parents=True, exist_ok=True)
|
| 250 |
+
default_name = (
|
| 251 |
+
f"{game_name}.stats.json"
|
| 252 |
+
if output_format == "json"
|
| 253 |
+
else f"{game_name}.stats.jsonl"
|
| 254 |
+
)
|
| 255 |
+
outfile = outdir / (
|
| 256 |
+
output_filename if output_filename is not None else default_name
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
if outfile.exists():
|
| 260 |
+
outfile.unlink()
|
| 261 |
+
|
| 262 |
+
iteration_folders = find_iteration_folders(str(data_root))
|
| 263 |
+
|
| 264 |
+
def finalize_rollout(
|
| 265 |
+
agg: Dict[str, Dict[str, List[float]]]
|
| 266 |
+
) -> Dict[str, Dict[str, float]]:
|
| 267 |
+
# avg per metric per agent
|
| 268 |
+
result: Dict[str, Dict[str, float]] = {}
|
| 269 |
+
for mname, agent_values in agg.items():
|
| 270 |
+
result[mname] = {}
|
| 271 |
+
for aid, vals in agent_values.items():
|
| 272 |
+
if not vals:
|
| 273 |
+
result[mname][aid] = None # keep alignment; could be None
|
| 274 |
+
else:
|
| 275 |
+
result[mname][aid] = sum(vals) / len(vals)
|
| 276 |
+
return result
|
| 277 |
+
|
| 278 |
+
if output_format == "jsonl":
|
| 279 |
+
with open(outfile, "w", encoding="utf-8") as w:
|
| 280 |
+
for iteration_folder in iteration_folders:
|
| 281 |
+
iteration_name = Path(iteration_folder).name
|
| 282 |
+
for pkl_path in stream_rollout_files(Path(iteration_folder)):
|
| 283 |
+
root = load_root(pkl_path)
|
| 284 |
+
|
| 285 |
+
# aggregator structure: metric -> agent_id -> list of values
|
| 286 |
+
agg: Dict[str, Dict[str, List[float]]] = {
|
| 287 |
+
m: {} for m in metrics.keys()
|
| 288 |
+
}
|
| 289 |
+
|
| 290 |
+
for sl in iterate_main_simulation_logs(root):
|
| 291 |
+
for mname, fn in metrics.items():
|
| 292 |
+
try:
|
| 293 |
+
vals = fn(sl)
|
| 294 |
+
except Exception:
|
| 295 |
+
vals = None
|
| 296 |
+
if not vals:
|
| 297 |
+
continue
|
| 298 |
+
for aid, v in vals.items():
|
| 299 |
+
if v is None:
|
| 300 |
+
continue
|
| 301 |
+
lst = agg[mname].setdefault(str(aid), [])
|
| 302 |
+
try:
|
| 303 |
+
lst.append(float(v))
|
| 304 |
+
except Exception:
|
| 305 |
+
continue
|
| 306 |
+
|
| 307 |
+
values = finalize_rollout(agg)
|
| 308 |
+
rec = {
|
| 309 |
+
"mgid": getattr(root, "id", None),
|
| 310 |
+
"crn_id": getattr(root, "crn_id", None),
|
| 311 |
+
"iteration": iteration_name,
|
| 312 |
+
"stats": values,
|
| 313 |
+
}
|
| 314 |
+
w.write(json.dumps(rec, ensure_ascii=False) + "\n")
|
| 315 |
+
|
| 316 |
+
del root
|
| 317 |
+
gc.collect()
|
| 318 |
+
else:
|
| 319 |
+
records: List[Dict[str, Any]] = []
|
| 320 |
+
for iteration_folder in iteration_folders:
|
| 321 |
+
iteration_name = Path(iteration_folder).name
|
| 322 |
+
for pkl_path in stream_rollout_files(Path(iteration_folder)):
|
| 323 |
+
root = load_root(pkl_path)
|
| 324 |
+
|
| 325 |
+
agg: Dict[str, Dict[str, List[float]]] = {m: {} for m in metrics.keys()}
|
| 326 |
+
for sl in iterate_main_simulation_logs(root):
|
| 327 |
+
for mname, fn in metrics.items():
|
| 328 |
+
try:
|
| 329 |
+
vals = fn(sl)
|
| 330 |
+
except Exception:
|
| 331 |
+
vals = None
|
| 332 |
+
if not vals:
|
| 333 |
+
continue
|
| 334 |
+
for aid, v in vals.items():
|
| 335 |
+
if v is None:
|
| 336 |
+
continue
|
| 337 |
+
lst = agg[mname].setdefault(str(aid), [])
|
| 338 |
+
try:
|
| 339 |
+
lst.append(float(v))
|
| 340 |
+
except Exception:
|
| 341 |
+
continue
|
| 342 |
+
|
| 343 |
+
values = finalize_rollout(agg)
|
| 344 |
+
records.append(
|
| 345 |
+
{
|
| 346 |
+
"mgid": getattr(root, "id", None),
|
| 347 |
+
"crn_id": getattr(root, "crn_id", None),
|
| 348 |
+
"iteration": iteration_name,
|
| 349 |
+
"stats": values,
|
| 350 |
+
}
|
| 351 |
+
)
|
| 352 |
+
|
| 353 |
+
del root
|
| 354 |
+
gc.collect()
|
| 355 |
+
|
| 356 |
+
# Build dict-of-lists output
|
| 357 |
+
mgids: List[Any] = []
|
| 358 |
+
crn_ids: List[Any] = []
|
| 359 |
+
iterations_out: List[str] = []
|
| 360 |
+
stats_out: Dict[str, Any] = {}
|
| 361 |
+
|
| 362 |
+
stat_keys: set[str] = set()
|
| 363 |
+
nested_agent_keys: Dict[str, set[str]] = {}
|
| 364 |
+
for r in records:
|
| 365 |
+
stats = r.get("stats", {}) or {}
|
| 366 |
+
for k, v in stats.items():
|
| 367 |
+
stat_keys.add(k)
|
| 368 |
+
if isinstance(v, dict):
|
| 369 |
+
nested = nested_agent_keys.setdefault(k, set())
|
| 370 |
+
for ak in v.keys():
|
| 371 |
+
nested.add(str(ak))
|
| 372 |
+
|
| 373 |
+
for k in stat_keys:
|
| 374 |
+
if k in nested_agent_keys:
|
| 375 |
+
stats_out[k] = {ak: [] for ak in sorted(nested_agent_keys[k])}
|
| 376 |
+
else:
|
| 377 |
+
stats_out[k] = []
|
| 378 |
+
|
| 379 |
+
for r in records:
|
| 380 |
+
mgids.append(r.get("mgid"))
|
| 381 |
+
crn_ids.append(r.get("crn_id"))
|
| 382 |
+
iterations_out.append(r.get("iteration"))
|
| 383 |
+
stats = r.get("stats", {}) or {}
|
| 384 |
+
for k in stat_keys:
|
| 385 |
+
val = stats.get(k)
|
| 386 |
+
if isinstance(stats_out[k], dict):
|
| 387 |
+
agent_dict = val if isinstance(val, dict) else {}
|
| 388 |
+
for ak in stats_out[k].keys():
|
| 389 |
+
stats_out[k][ak].append(agent_dict.get(ak))
|
| 390 |
+
else:
|
| 391 |
+
stats_out[k].append(val)
|
| 392 |
+
|
| 393 |
+
with open(outfile, "w", encoding="utf-8") as w:
|
| 394 |
+
json.dump(
|
| 395 |
+
{
|
| 396 |
+
"mgid": mgids,
|
| 397 |
+
"crn_id": crn_ids,
|
| 398 |
+
"iteration": iterations_out,
|
| 399 |
+
"stats": stats_out,
|
| 400 |
+
},
|
| 401 |
+
w,
|
| 402 |
+
ensure_ascii=False,
|
| 403 |
+
)
|
| 404 |
+
|
| 405 |
+
return outfile
|
src_code_for_reproducibility/models/__init__.py
ADDED
|
File without changes
|
src_code_for_reproducibility/models/adapter_training_wrapper.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import logging
|
| 4 |
+
from typing import Union
|
| 5 |
+
from peft import (
|
| 6 |
+
LoraConfig,
|
| 7 |
+
get_peft_model,
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
logger = logging.getLogger(__name__)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class AdapterWrapper(nn.Module):
|
| 14 |
+
"""
|
| 15 |
+
A thin façade that
|
| 16 |
+
• keeps a reference to a *shared* PEFT-wrapped model,
|
| 17 |
+
• ensures `set_adapter(adapter)` is called on every forward,
|
| 18 |
+
• exposes only the parameters that should be trained for that adapter
|
| 19 |
+
(plus whatever extra modules you name).
|
| 20 |
+
"""
|
| 21 |
+
def __init__(
|
| 22 |
+
self,
|
| 23 |
+
shared_llm: nn.Module,
|
| 24 |
+
adapter_id: str,
|
| 25 |
+
lora_config: dict,
|
| 26 |
+
path: Union[str, None] = None,
|
| 27 |
+
):
|
| 28 |
+
super().__init__()
|
| 29 |
+
self.shared_llm = shared_llm
|
| 30 |
+
self.adapter_id = adapter_id
|
| 31 |
+
lora_config = LoraConfig(**lora_config)
|
| 32 |
+
# this modifies the shared llm in place, adding a lora adapter inside
|
| 33 |
+
self.shared_llm = get_peft_model(
|
| 34 |
+
model=shared_llm,
|
| 35 |
+
peft_config=lora_config,
|
| 36 |
+
adapter_name=adapter_id,
|
| 37 |
+
)
|
| 38 |
+
self.shared_llm.train()
|
| 39 |
+
# Load external adapter weights if provided
|
| 40 |
+
loaded_from: str | None = None
|
| 41 |
+
if path:
|
| 42 |
+
try:
|
| 43 |
+
# Supports both local filesystem paths and HF Hub repo IDs
|
| 44 |
+
self.shared_llm.load_adapter(
|
| 45 |
+
is_trainable=True,
|
| 46 |
+
model_id=path,
|
| 47 |
+
adapter_name=adapter_id,
|
| 48 |
+
)
|
| 49 |
+
loaded_from = path
|
| 50 |
+
except Exception as exc: # noqa: BLE001 - want to log any load failure context
|
| 51 |
+
logger.warning(
|
| 52 |
+
f"Adapter '{adapter_id}': failed to load from '{path}': {exc}"
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
if loaded_from:
|
| 56 |
+
logger.info(
|
| 57 |
+
f"Adapter '{adapter_id}': loaded initial weights from '{loaded_from}'."
|
| 58 |
+
)
|
| 59 |
+
else:
|
| 60 |
+
logger.info(
|
| 61 |
+
f"Adapter '{adapter_id}': initialized with fresh weights (no initial weights found)."
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
def parameters(self, recurse: bool = True):
|
| 65 |
+
"""
|
| 66 |
+
"recurse" is just for pytorch compatibility
|
| 67 |
+
"""
|
| 68 |
+
self.shared_llm.set_adapter(self.adapter_id)
|
| 69 |
+
params = [p for p in self.shared_llm.parameters() if p.requires_grad]
|
| 70 |
+
|
| 71 |
+
return params
|
| 72 |
+
|
| 73 |
+
def get_base_model_logits(self, contexts):
|
| 74 |
+
"""
|
| 75 |
+
Run the base model (without adapter) in inference mode, without tracking gradients.
|
| 76 |
+
This is useful to get reference logits for KL-divergence computation.
|
| 77 |
+
"""
|
| 78 |
+
with torch.no_grad():
|
| 79 |
+
with self.shared_llm.disable_adapter():
|
| 80 |
+
return self.shared_llm(input_ids=contexts)[0]
|
| 81 |
+
|
| 82 |
+
def forward(self, *args, **kwargs):
|
| 83 |
+
self.shared_llm.set_adapter(self.adapter_id)
|
| 84 |
+
return self.shared_llm(*args, **kwargs)
|
| 85 |
+
|
| 86 |
+
def save_pretrained(self, save_path):
|
| 87 |
+
self.shared_llm.save_pretrained(save_path)
|
| 88 |
+
|
| 89 |
+
def gradient_checkpointing_enable(self, *args, **kwargs):
|
| 90 |
+
self.shared_llm.gradient_checkpointing_enable(*args, **kwargs)
|
| 91 |
+
|
| 92 |
+
@property
|
| 93 |
+
def dtype(self):
|
| 94 |
+
return self.shared_llm.dtype
|
| 95 |
+
|
| 96 |
+
@property
|
| 97 |
+
def device(self):
|
| 98 |
+
return self.shared_llm.device
|
src_code_for_reproducibility/models/human_policy.py
ADDED
|
@@ -0,0 +1,255 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import os
|
| 3 |
+
import re
|
| 4 |
+
import shutil
|
| 5 |
+
import sys
|
| 6 |
+
from typing import Callable, Dict, List, Optional
|
| 7 |
+
|
| 8 |
+
from mllm.markov_games.rollout_tree import ChatTurn
|
| 9 |
+
|
| 10 |
+
try:
|
| 11 |
+
import rstr # For generating example strings from regex
|
| 12 |
+
except Exception: # pragma: no cover
|
| 13 |
+
rstr = None
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def _clear_terminal() -> None:
|
| 17 |
+
"""
|
| 18 |
+
Clear the terminal screen in a cross-platform manner.
|
| 19 |
+
"""
|
| 20 |
+
if sys.stdout.isatty():
|
| 21 |
+
os.system("cls" if os.name == "nt" else "clear")
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def _terminal_width(default: int = 100) -> int:
|
| 25 |
+
try:
|
| 26 |
+
return shutil.get_terminal_size().columns
|
| 27 |
+
except Exception:
|
| 28 |
+
return default
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def _horizontal_rule(char: str = "─") -> str:
|
| 32 |
+
width = max(20, _terminal_width() - 2)
|
| 33 |
+
return char * width
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class _Style:
|
| 37 |
+
# ANSI colors (bright, readable)
|
| 38 |
+
RESET = "\033[0m"
|
| 39 |
+
BOLD = "\033[1m"
|
| 40 |
+
DIM = "\033[2m"
|
| 41 |
+
# Foreground colors
|
| 42 |
+
FG_BLUE = "\033[94m" # user/system headers
|
| 43 |
+
FG_GREEN = "\033[92m" # human response header
|
| 44 |
+
FG_YELLOW = "\033[93m" # notices
|
| 45 |
+
FG_RED = "\033[91m" # errors
|
| 46 |
+
FG_MAGENTA = "\033[95m" # regex
|
| 47 |
+
FG_CYAN = "\033[96m" # tips
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def _render_chat(state) -> str:
|
| 51 |
+
"""
|
| 52 |
+
Render prior messages in a compact, readable terminal format.
|
| 53 |
+
|
| 54 |
+
Expected message dict keys: {"role": str, "content": str, ...}
|
| 55 |
+
"""
|
| 56 |
+
lines: List[str] = []
|
| 57 |
+
lines.append(_horizontal_rule())
|
| 58 |
+
lines.append(f"{_Style.FG_BLUE}{_Style.BOLD} Conversation so far {_Style.RESET}")
|
| 59 |
+
lines.append(_horizontal_rule())
|
| 60 |
+
for chat in state:
|
| 61 |
+
role = chat.role
|
| 62 |
+
content = str(chat.content).strip()
|
| 63 |
+
# Map roles to display names and colors/emojis
|
| 64 |
+
if role == "assistant":
|
| 65 |
+
header = f"{_Style.FG_GREEN}{_Style.BOLD}HUMAN--🧑💻{_Style.RESET}"
|
| 66 |
+
elif role == "user":
|
| 67 |
+
header = f"{_Style.FG_BLUE}{_Style.BOLD}USER--⚙️{_Style.RESET}"
|
| 68 |
+
else:
|
| 69 |
+
header = f"[{_Style.DIM}{role.upper()}{_Style.RESET}]"
|
| 70 |
+
lines.append(header)
|
| 71 |
+
# Indent content for readability
|
| 72 |
+
for line in content.splitlines() or [""]:
|
| 73 |
+
lines.append(f" {line}")
|
| 74 |
+
lines.append("")
|
| 75 |
+
lines.append(_horizontal_rule())
|
| 76 |
+
return "\n".join(lines)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
async def _async_input(prompt_text: str) -> str:
|
| 80 |
+
"""Non-blocking input using a background thread."""
|
| 81 |
+
return await asyncio.to_thread(input, prompt_text)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def _short_regex_example(regex: str, max_len: int = 30) -> Optional[str]:
|
| 85 |
+
"""
|
| 86 |
+
Try to produce a short example string that matches the regex.
|
| 87 |
+
We attempt multiple times and pick the first <= max_len.
|
| 88 |
+
"""
|
| 89 |
+
if rstr is None:
|
| 90 |
+
return None
|
| 91 |
+
try:
|
| 92 |
+
for _ in range(20):
|
| 93 |
+
candidate = rstr.xeger(regex)
|
| 94 |
+
if len(candidate) <= max_len:
|
| 95 |
+
return candidate
|
| 96 |
+
# Fallback to truncation (may break match, so don't return)
|
| 97 |
+
return None
|
| 98 |
+
except Exception:
|
| 99 |
+
return None
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def _detect_input_type(regex: str | None) -> tuple[str, str, str]:
|
| 103 |
+
"""
|
| 104 |
+
Detect what type of input is expected based on the regex pattern.
|
| 105 |
+
Returns (input_type, start_tag, end_tag)
|
| 106 |
+
"""
|
| 107 |
+
if regex is None:
|
| 108 |
+
return "text", "", ""
|
| 109 |
+
|
| 110 |
+
if "message_start" in regex and "message_end" in regex:
|
| 111 |
+
return "message", "<<message_start>>", "<<message_end>>"
|
| 112 |
+
elif "proposal_start" in regex and "proposal_end" in regex:
|
| 113 |
+
return "proposal", "<<proposal_start>>", "<<proposal_end>>"
|
| 114 |
+
else:
|
| 115 |
+
return "text", "", ""
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
async def human_policy(state, agent_id, regex: str | None = None) -> str:
|
| 119 |
+
"""
|
| 120 |
+
Async human-in-the-loop policy.
|
| 121 |
+
|
| 122 |
+
- Displays prior conversation context in the terminal.
|
| 123 |
+
- Prompts the user for a response.
|
| 124 |
+
- If a regex is provided, validates and re-prompts until it matches.
|
| 125 |
+
- Automatically adds formatting tags based on expected input type.
|
| 126 |
+
|
| 127 |
+
Args:
|
| 128 |
+
prompt: Chat history as a list of {role, content} dicts.
|
| 129 |
+
regex: Optional fullmatch validation pattern.
|
| 130 |
+
|
| 131 |
+
Returns:
|
| 132 |
+
The user's validated response string.
|
| 133 |
+
"""
|
| 134 |
+
# Detect input type and formatting
|
| 135 |
+
input_type, start_tag, end_tag = _detect_input_type(regex)
|
| 136 |
+
|
| 137 |
+
while True:
|
| 138 |
+
_clear_terminal()
|
| 139 |
+
print(_render_chat(state))
|
| 140 |
+
|
| 141 |
+
if regex:
|
| 142 |
+
example = _short_regex_example(regex, max_len=30)
|
| 143 |
+
print(
|
| 144 |
+
f"{_Style.FG_MAGENTA}{_Style.BOLD}Expected format (regex fullmatch):{_Style.RESET}"
|
| 145 |
+
)
|
| 146 |
+
print(f" {_Style.FG_MAGENTA}{regex}{_Style.RESET}")
|
| 147 |
+
if example:
|
| 148 |
+
print(
|
| 149 |
+
f"{_Style.FG_CYAN}Example (random, <=30 chars):{_Style.RESET} {example}"
|
| 150 |
+
)
|
| 151 |
+
print(_horizontal_rule("."))
|
| 152 |
+
|
| 153 |
+
# Custom prompt based on input type
|
| 154 |
+
if input_type == "message":
|
| 155 |
+
print(
|
| 156 |
+
f"{_Style.FG_YELLOW}Type your message content (formatting will be added automatically):{_Style.RESET}"
|
| 157 |
+
)
|
| 158 |
+
elif input_type == "proposal":
|
| 159 |
+
print(
|
| 160 |
+
f"{_Style.FG_YELLOW}Type your proposal (number only, formatting will be added automatically):{_Style.RESET}"
|
| 161 |
+
)
|
| 162 |
+
else:
|
| 163 |
+
print(
|
| 164 |
+
f"{_Style.FG_YELLOW}Type your response and press Enter.{_Style.RESET}"
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
print(
|
| 168 |
+
f"{_Style.DIM}Commands: /help to view commands, /refresh to re-render, /quit to abort{_Style.RESET}"
|
| 169 |
+
)
|
| 170 |
+
else:
|
| 171 |
+
print(
|
| 172 |
+
f"{_Style.FG_YELLOW}Type your response and press Enter.{_Style.RESET} {_Style.DIM}(/help for commands){_Style.RESET}"
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
user_in = (await _async_input("> ")).rstrip("\n")
|
| 176 |
+
|
| 177 |
+
# Commands
|
| 178 |
+
if user_in.strip().lower() in {"/help", "/h"}:
|
| 179 |
+
print(f"\n{_Style.FG_CYAN}{_Style.BOLD}Available commands:{_Style.RESET}")
|
| 180 |
+
print(
|
| 181 |
+
f" {_Style.FG_CYAN}/help{_Style.RESET} or {_Style.FG_CYAN}/h{_Style.RESET} Show this help"
|
| 182 |
+
)
|
| 183 |
+
print(
|
| 184 |
+
f" {_Style.FG_CYAN}/refresh{_Style.RESET} or {_Style.FG_CYAN}/r{_Style.RESET} Re-render the conversation and prompt"
|
| 185 |
+
)
|
| 186 |
+
print(
|
| 187 |
+
f" {_Style.FG_CYAN}/quit{_Style.RESET} or {_Style.FG_CYAN}/q{_Style.RESET} Abort the run (raises KeyboardInterrupt)"
|
| 188 |
+
)
|
| 189 |
+
await asyncio.sleep(1.0)
|
| 190 |
+
continue
|
| 191 |
+
if user_in.strip().lower() in {"/refresh", "/r"}:
|
| 192 |
+
continue
|
| 193 |
+
if user_in.strip().lower() in {"/quit", "/q"}:
|
| 194 |
+
raise KeyboardInterrupt("Human aborted run from human_policy")
|
| 195 |
+
|
| 196 |
+
# Add formatting tags if needed
|
| 197 |
+
if start_tag and end_tag:
|
| 198 |
+
formatted_input = f"{start_tag}{user_in}{end_tag}"
|
| 199 |
+
else:
|
| 200 |
+
formatted_input = user_in
|
| 201 |
+
|
| 202 |
+
if regex is None:
|
| 203 |
+
return ChatTurn(
|
| 204 |
+
role="assistant", agent_id=agent_id, content=formatted_input
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
# Validate against regex (fullmatch)
|
| 208 |
+
try:
|
| 209 |
+
pattern = re.compile(regex)
|
| 210 |
+
except re.error as e:
|
| 211 |
+
# If regex is invalid, fall back to accepting any input
|
| 212 |
+
print(
|
| 213 |
+
f"{_Style.FG_RED}Warning:{_Style.RESET} Provided regex is invalid: {e}. Accepting input without validation."
|
| 214 |
+
)
|
| 215 |
+
await asyncio.sleep(0.5)
|
| 216 |
+
return ChatTurn(
|
| 217 |
+
role="assistant", agent_id=agent_id, content=formatted_input
|
| 218 |
+
)
|
| 219 |
+
|
| 220 |
+
if pattern.fullmatch(formatted_input):
|
| 221 |
+
return ChatTurn(
|
| 222 |
+
role="assistant", agent_id=agent_id, content=formatted_input
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
# Show validation error and re-prompt
|
| 226 |
+
print("")
|
| 227 |
+
print(
|
| 228 |
+
f"{_Style.FG_RED}{_Style.BOLD}Input did not match the required format.{_Style.RESET} Please try again."
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
if input_type == "message":
|
| 232 |
+
print(
|
| 233 |
+
f"You entered: {_Style.FG_CYAN}{start_tag}{user_in}{end_tag}{_Style.RESET}"
|
| 234 |
+
)
|
| 235 |
+
print(f"Just type the message content without tags.")
|
| 236 |
+
elif input_type == "proposal":
|
| 237 |
+
print(
|
| 238 |
+
f"You entered: {_Style.FG_CYAN}{start_tag}{user_in}{end_tag}{_Style.RESET}"
|
| 239 |
+
)
|
| 240 |
+
print(f"Just type the number without tags.")
|
| 241 |
+
else:
|
| 242 |
+
print(f"Expected (regex):")
|
| 243 |
+
print(f" {_Style.FG_MAGENTA}{regex}{_Style.RESET}")
|
| 244 |
+
|
| 245 |
+
print(_horizontal_rule("."))
|
| 246 |
+
print(f"{_Style.FG_YELLOW}Press Enter to retry...{_Style.RESET}")
|
| 247 |
+
await _async_input("")
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
def get_human_policies() -> Dict[str, Callable[[List[Dict]], str]]:
|
| 251 |
+
"""
|
| 252 |
+
Expose the human policy in the same map shape used elsewhere.
|
| 253 |
+
"""
|
| 254 |
+
# Type hint says Callable[[List[Dict]], str] but we intentionally return the async callable.
|
| 255 |
+
return {"human_policy": human_policy} # type: ignore[return-value]
|
src_code_for_reproducibility/models/inference_backend.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from abc import ABC, abstractmethod
|
| 2 |
+
from dataclasses import dataclass
|
| 3 |
+
from typing import Any, Optional
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@dataclass
|
| 7 |
+
class LLMInferenceOutput:
|
| 8 |
+
content: str
|
| 9 |
+
reasoning_content: str | None = None
|
| 10 |
+
log_probs: list[float] | None = None
|
| 11 |
+
out_token_ids: list[int] | None = None
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class LLMInferenceBackend(ABC):
|
| 15 |
+
@abstractmethod
|
| 16 |
+
def __init__(self, **kwargs):
|
| 17 |
+
...
|
| 18 |
+
|
| 19 |
+
@abstractmethod
|
| 20 |
+
def prepare_adapter(
|
| 21 |
+
self, adapter_id: str, weights_got_updated: bool = False
|
| 22 |
+
) -> None:
|
| 23 |
+
"""Ensure adapter is ready/loaded for next generation call."""
|
| 24 |
+
|
| 25 |
+
@abstractmethod
|
| 26 |
+
async def generate(self, prompt: list[dict], regex: Optional[str] = None) -> str:
|
| 27 |
+
...
|
| 28 |
+
|
| 29 |
+
@abstractmethod
|
| 30 |
+
def toggle_training_mode(self) -> None:
|
| 31 |
+
...
|
| 32 |
+
|
| 33 |
+
@abstractmethod
|
| 34 |
+
def toggle_eval_mode(self) -> None:
|
| 35 |
+
...
|
| 36 |
+
|
| 37 |
+
@abstractmethod
|
| 38 |
+
def shutdown(self) -> None:
|
| 39 |
+
...
|
src_code_for_reproducibility/models/inference_backend_dummy.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
from typing import Optional
|
| 3 |
+
|
| 4 |
+
import rstr
|
| 5 |
+
from transformers import AutoTokenizer
|
| 6 |
+
|
| 7 |
+
from mllm.models.inference_backend import LLMInferenceBackend, LLMInferenceOutput
|
| 8 |
+
from mllm.utils.short_id_gen import generate_short_id
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class DummyInferenceBackend(LLMInferenceBackend):
|
| 12 |
+
def __init__(
|
| 13 |
+
self,
|
| 14 |
+
*args,
|
| 15 |
+
**kwargs,
|
| 16 |
+
):
|
| 17 |
+
pass
|
| 18 |
+
|
| 19 |
+
def prepare_adapter(
|
| 20 |
+
self,
|
| 21 |
+
adapter_id: Optional[str],
|
| 22 |
+
weights_got_updated: bool,
|
| 23 |
+
adapter_path: Optional[str] = None,
|
| 24 |
+
) -> None:
|
| 25 |
+
pass
|
| 26 |
+
|
| 27 |
+
async def toggle_training_mode(self) -> None:
|
| 28 |
+
await asyncio.sleep(0)
|
| 29 |
+
pass
|
| 30 |
+
|
| 31 |
+
async def toggle_eval_mode(self) -> None:
|
| 32 |
+
await asyncio.sleep(0)
|
| 33 |
+
pass
|
| 34 |
+
|
| 35 |
+
def shutdown(self) -> None:
|
| 36 |
+
pass
|
| 37 |
+
|
| 38 |
+
async def generate(
|
| 39 |
+
self,
|
| 40 |
+
prompt_text: str,
|
| 41 |
+
regex: Optional[str] = None,
|
| 42 |
+
extract_thinking: bool = False,
|
| 43 |
+
) -> LLMInferenceOutput:
|
| 44 |
+
if regex:
|
| 45 |
+
# Create random string that respects the regex
|
| 46 |
+
return LLMInferenceOutput(
|
| 47 |
+
content=rstr.xeger(regex),
|
| 48 |
+
reasoning_content="I don't think, I am a dummy backend.",
|
| 49 |
+
)
|
| 50 |
+
else:
|
| 51 |
+
return LLMInferenceOutput(
|
| 52 |
+
content="I am a dummy backend without a regex.",
|
| 53 |
+
reasoning_content="I don't think, I am a dummy backend.",
|
| 54 |
+
)
|
src_code_for_reproducibility/models/inference_backend_sglang.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# new_backend_sglang_offline.py
|
| 2 |
+
from __future__ import annotations
|
| 3 |
+
|
| 4 |
+
import asyncio
|
| 5 |
+
from typing import Any, Optional
|
| 6 |
+
|
| 7 |
+
# import sglang as sgl
|
| 8 |
+
|
| 9 |
+
from mllm.models.inference_backend import LLMInferenceBackend
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class SGLangOfflineBackend(LLMInferenceBackend):
|
| 13 |
+
def __init__(
|
| 14 |
+
self,
|
| 15 |
+
model_name: str,
|
| 16 |
+
tokenizer, # unused but kept for parity
|
| 17 |
+
adapter_paths: dict[str, str],
|
| 18 |
+
device: str = "cuda",
|
| 19 |
+
max_model_len: Optional[int] = None,
|
| 20 |
+
enable_lora: bool = True,
|
| 21 |
+
lora_target_modules: Optional[list[str] | str] = None,
|
| 22 |
+
max_loras_per_batch: int = 8,
|
| 23 |
+
engine_kwargs: dict[str, Any] = None,
|
| 24 |
+
):
|
| 25 |
+
self.model_name = model_name
|
| 26 |
+
self.adapter_paths = adapter_paths
|
| 27 |
+
self.current_adapter: Optional[str] = None
|
| 28 |
+
engine_kwargs = dict(engine_kwargs or {})
|
| 29 |
+
# Map server-style LoRA flags to offline engine ctor
|
| 30 |
+
if enable_lora and adapter_paths:
|
| 31 |
+
engine_kwargs.setdefault("enable_lora", True)
|
| 32 |
+
# The offline Engine mirrors server args; pass a mapping name->path
|
| 33 |
+
engine_kwargs.setdefault("lora_paths", adapter_paths)
|
| 34 |
+
if lora_target_modules is not None:
|
| 35 |
+
engine_kwargs.setdefault("lora_target_modules", lora_target_modules)
|
| 36 |
+
engine_kwargs.setdefault("max_loras_per_batch", max_loras_per_batch)
|
| 37 |
+
|
| 38 |
+
if max_model_len is not None:
|
| 39 |
+
engine_kwargs.setdefault("context_length", max_model_len)
|
| 40 |
+
|
| 41 |
+
# Launch in-process engine (no HTTP server)
|
| 42 |
+
self.llm = sgl.Engine(model_path=model_name, **engine_kwargs) # async-ready
|
| 43 |
+
# SGLang supports: generate(), async_generate(), and async streaming helpers. :contentReference[oaicite:2]{index=2}
|
| 44 |
+
|
| 45 |
+
def is_ready(self) -> bool:
|
| 46 |
+
return True
|
| 47 |
+
|
| 48 |
+
def toggle_training_mode(self) -> None:
|
| 49 |
+
# No explicit KV release API offline; typically you pause usage here.
|
| 50 |
+
pass
|
| 51 |
+
|
| 52 |
+
def toggle_eval_mode(self) -> None:
|
| 53 |
+
pass
|
| 54 |
+
|
| 55 |
+
def shutdown(self) -> None:
|
| 56 |
+
# Engine cleans up on GC; explicit close not required.
|
| 57 |
+
pass
|
| 58 |
+
|
| 59 |
+
def prepare_adapter(self, adapter_id: Optional[str]) -> None:
|
| 60 |
+
# With offline Engine, when LoRA is enabled at init,
|
| 61 |
+
# you select adapter per request via the input batch mapping.
|
| 62 |
+
self.current_adapter = adapter_id
|
| 63 |
+
|
| 64 |
+
async def generate(
|
| 65 |
+
self, prompt_text: str, sampling_params: dict, adapter_id: Optional[str]
|
| 66 |
+
) -> str:
|
| 67 |
+
# Non-streaming async (batch of 1). For batched prompts, pass a list.
|
| 68 |
+
params = {
|
| 69 |
+
"temperature": sampling_params.get("temperature", 1.0),
|
| 70 |
+
"top_p": sampling_params.get("top_p", 1.0),
|
| 71 |
+
"max_new_tokens": sampling_params.get("max_new_tokens", 128),
|
| 72 |
+
}
|
| 73 |
+
if (tk := sampling_params.get("top_k", -1)) and tk > 0:
|
| 74 |
+
params["top_k"] = tk
|
| 75 |
+
if (mn := sampling_params.get("min_new_tokens")) is not None:
|
| 76 |
+
params["min_new_tokens"] = mn
|
| 77 |
+
if (fp := sampling_params.get("frequency_penalty")) is not None:
|
| 78 |
+
params["frequency_penalty"] = fp
|
| 79 |
+
|
| 80 |
+
# If using multi-LoRA, SGLang lets you provide adapter names aligned to each input.
|
| 81 |
+
prompts = [prompt_text]
|
| 82 |
+
adapters = [adapter_id] if adapter_id else None # or omit for base
|
| 83 |
+
outs = await self.llm.async_generate(
|
| 84 |
+
prompts, params, adapters
|
| 85 |
+
) # :contentReference[oaicite:3]{index=3}
|
| 86 |
+
return outs[0]["text"]
|
src_code_for_reproducibility/models/inference_backend_sglang_local_server.py
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
import httpx
|
| 4 |
+
import requests
|
| 5 |
+
from sglang.utils import launch_server_cmd, wait_for_server
|
| 6 |
+
|
| 7 |
+
from mllm.models.inference_backend import LLMInferenceBackend
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class HttpSGLangBackend(LLMInferenceBackend):
|
| 11 |
+
def __init__(self, **kwargs):
|
| 12 |
+
super().__init__(**kwargs)
|
| 13 |
+
self.port = None
|
| 14 |
+
self.proc = None
|
| 15 |
+
self.urls = {}
|
| 16 |
+
# track sglang adapter ids separately from your logical ids
|
| 17 |
+
self.sglang_names = {aid: aid for aid in self.adapter_paths.keys()}
|
| 18 |
+
self.needs_loading = {aid: True for aid in self.adapter_paths.keys()}
|
| 19 |
+
|
| 20 |
+
# defaults you already used:
|
| 21 |
+
self.mem_fraction = kwargs.get("mem_fraction_static", 0.6)
|
| 22 |
+
self.dtype = kwargs.get("dtype", "bfloat16")
|
| 23 |
+
self.extra_cli = kwargs.get("extra_cli", "")
|
| 24 |
+
self.disable_radix_cache = kwargs.get("disable_radix_cache", True)
|
| 25 |
+
|
| 26 |
+
def launch(self) -> None:
|
| 27 |
+
# find local hf cache path for server
|
| 28 |
+
from transformers.utils import cached_file
|
| 29 |
+
|
| 30 |
+
local_llm_path = os.path.split(cached_file(self.model_name, "config.json"))[0]
|
| 31 |
+
|
| 32 |
+
lora_str = ""
|
| 33 |
+
if self.adapter_paths:
|
| 34 |
+
lora_str = "--lora-paths " + " ".join(
|
| 35 |
+
f"{aid}={path}" for aid, path in self.adapter_paths.items()
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
cmd = f"""
|
| 39 |
+
python3 -m sglang.launch_server --model-path {local_llm_path} \
|
| 40 |
+
--host 0.0.0.0 {lora_str} \
|
| 41 |
+
{'--disable-radix-cache' if self.disable_radix_cache else ''} \
|
| 42 |
+
--mem-fraction-static {self.mem_fraction} --dtype {self.dtype} {self.extra_cli}
|
| 43 |
+
"""
|
| 44 |
+
self.proc, self.port = launch_server_cmd(cmd)
|
| 45 |
+
wait_for_server(f"http://localhost:{self.port}")
|
| 46 |
+
base = f"http://localhost:{self.port}"
|
| 47 |
+
self.urls = dict(
|
| 48 |
+
generate=f"{base}/generate",
|
| 49 |
+
release=f"{base}/release_memory_occupation",
|
| 50 |
+
resume=f"{base}/resume_memory_occupation",
|
| 51 |
+
load_lora=f"{base}/load_lora_adapter",
|
| 52 |
+
unload_lora=f"{base}/unload_lora_adapter",
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
def is_ready(self) -> bool:
|
| 56 |
+
try:
|
| 57 |
+
requests.get(self.urls["generate"], timeout=2)
|
| 58 |
+
return True
|
| 59 |
+
except Exception:
|
| 60 |
+
return False
|
| 61 |
+
|
| 62 |
+
def prepare_adapter(self, adapter_id: str) -> None:
|
| 63 |
+
if adapter_id is None:
|
| 64 |
+
return
|
| 65 |
+
if self.needs_loading.get(adapter_id, False):
|
| 66 |
+
# unload old name if present
|
| 67 |
+
try:
|
| 68 |
+
requests.post(
|
| 69 |
+
self.urls["unload_lora"],
|
| 70 |
+
json={"lora_name": self.sglang_names[adapter_id]},
|
| 71 |
+
timeout=10,
|
| 72 |
+
)
|
| 73 |
+
except Exception:
|
| 74 |
+
pass
|
| 75 |
+
new_name = self._short_id()
|
| 76 |
+
self.sglang_names[adapter_id] = new_name
|
| 77 |
+
requests.post(
|
| 78 |
+
self.urls["load_lora"],
|
| 79 |
+
json={
|
| 80 |
+
"lora_name": new_name,
|
| 81 |
+
"lora_path": self.adapter_paths[adapter_id],
|
| 82 |
+
},
|
| 83 |
+
).raise_for_status()
|
| 84 |
+
self.needs_loading[adapter_id] = False
|
| 85 |
+
|
| 86 |
+
async def generate(
|
| 87 |
+
self, prompt_text: str, sampling_params: dict, adapter_id: str | None
|
| 88 |
+
) -> str:
|
| 89 |
+
lora_name = self.sglang_names.get(adapter_id) if adapter_id else None
|
| 90 |
+
payload = {
|
| 91 |
+
"text": [prompt_text],
|
| 92 |
+
"sampling_params": sampling_params,
|
| 93 |
+
}
|
| 94 |
+
if lora_name:
|
| 95 |
+
payload["lora_path"] = [lora_name]
|
| 96 |
+
|
| 97 |
+
timeout = httpx.Timeout(3600.0, connect=3600.0)
|
| 98 |
+
async with httpx.AsyncClient(timeout=timeout) as client:
|
| 99 |
+
resp = await client.post(self.urls["generate"], json=payload)
|
| 100 |
+
resp.raise_for_status()
|
| 101 |
+
return resp.json()[0]["text"]
|
| 102 |
+
|
| 103 |
+
def toggle_training_mode(self) -> None:
|
| 104 |
+
# free KV space while training adapters
|
| 105 |
+
requests.post(
|
| 106 |
+
self.urls["release"], json={"tags": ["kv_cache"]}
|
| 107 |
+
).raise_for_status()
|
| 108 |
+
|
| 109 |
+
def toggle_eval_mode(self) -> None:
|
| 110 |
+
# re-allocate KV space
|
| 111 |
+
try:
|
| 112 |
+
requests.post(
|
| 113 |
+
self.urls["resume"], json={"tags": ["kv_cache"]}
|
| 114 |
+
).raise_for_status()
|
| 115 |
+
except Exception:
|
| 116 |
+
pass
|
| 117 |
+
|
| 118 |
+
def shutdown(self) -> None:
|
| 119 |
+
from sglang.utils import terminate_process
|
| 120 |
+
|
| 121 |
+
if self.proc:
|
| 122 |
+
terminate_process(self.proc)
|
| 123 |
+
|
| 124 |
+
def _short_id(self) -> str:
|
| 125 |
+
import uuid
|
| 126 |
+
|
| 127 |
+
return str(uuid.uuid4().int)[:8]
|
src_code_for_reproducibility/models/inference_backend_vllm.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import re
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
from transformers import AutoTokenizer
|
| 7 |
+
from vllm import AsyncEngineArgs, AsyncLLMEngine, SamplingParams
|
| 8 |
+
from vllm.inputs import TokensPrompt
|
| 9 |
+
from vllm.lora.request import LoRARequest
|
| 10 |
+
from vllm.sampling_params import GuidedDecodingParams, RequestOutputKind
|
| 11 |
+
|
| 12 |
+
from mllm.models.inference_backend import LLMInferenceBackend, LLMInferenceOutput
|
| 13 |
+
from mllm.utils.short_id_gen import generate_short_id
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class VLLMAsyncBackend(LLMInferenceBackend):
|
| 17 |
+
def __init__(
|
| 18 |
+
self,
|
| 19 |
+
model_name: str,
|
| 20 |
+
tokenizer: AutoTokenizer,
|
| 21 |
+
# adapter_paths: dict[str, str],
|
| 22 |
+
engine_init_kwargs: dict = {},
|
| 23 |
+
sampling_params: dict = {},
|
| 24 |
+
):
|
| 25 |
+
self.model_name = model_name
|
| 26 |
+
# self.adapter_paths = adapter_paths or {}
|
| 27 |
+
# self.current_adapter = None
|
| 28 |
+
# self.vllm_adapter_ids = {
|
| 29 |
+
# adapter_id: generate_short_id() for adapter_id in adapter_paths.keys()
|
| 30 |
+
# }
|
| 31 |
+
self.vllm_adapter_ids = {}
|
| 32 |
+
ea = dict(model=model_name, **engine_init_kwargs)
|
| 33 |
+
# ea["enable_lora"] = True
|
| 34 |
+
# ea["max_loras"] = len(self.vllm_adapter_ids)
|
| 35 |
+
# ea["enable_sleep_mode"] = True
|
| 36 |
+
self.engine = AsyncLLMEngine.from_engine_args(AsyncEngineArgs(**ea))
|
| 37 |
+
|
| 38 |
+
self.sampling_params = sampling_params
|
| 39 |
+
|
| 40 |
+
def prepare_adapter(
|
| 41 |
+
self,
|
| 42 |
+
adapter_id: Optional[str],
|
| 43 |
+
adapter_path: Optional[str],
|
| 44 |
+
weights_got_updated: bool,
|
| 45 |
+
) -> None:
|
| 46 |
+
# self.current_adapter = adapter_id
|
| 47 |
+
if weights_got_updated:
|
| 48 |
+
self.vllm_adapter_ids[adapter_id] = generate_short_id()
|
| 49 |
+
self.current_lora_request = LoRARequest(
|
| 50 |
+
adapter_id,
|
| 51 |
+
self.vllm_adapter_ids[adapter_id],
|
| 52 |
+
adapter_path,
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
async def toggle_training_mode(self) -> None:
|
| 56 |
+
await self.engine.sleep(level=1)
|
| 57 |
+
|
| 58 |
+
async def toggle_eval_mode(self) -> None:
|
| 59 |
+
await self.engine.wake_up()
|
| 60 |
+
|
| 61 |
+
def shutdown(self) -> None:
|
| 62 |
+
# No explicit close call; engine stops when process exits.
|
| 63 |
+
pass
|
| 64 |
+
|
| 65 |
+
async def generate(
|
| 66 |
+
self,
|
| 67 |
+
input_token_ids: list[int],
|
| 68 |
+
regex: Optional[str] = None,
|
| 69 |
+
extract_thinking: bool = False,
|
| 70 |
+
) -> LLMInferenceOutput:
|
| 71 |
+
# Build SamplingParams correctly
|
| 72 |
+
guided = GuidedDecodingParams(regex=regex) if regex else None
|
| 73 |
+
sp = SamplingParams(
|
| 74 |
+
**self.sampling_params,
|
| 75 |
+
guided_decoding=guided,
|
| 76 |
+
output_kind=RequestOutputKind.FINAL_ONLY,
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
prompt = TokensPrompt(prompt_token_ids=input_token_ids)
|
| 80 |
+
request_id = f"req-{asyncio.get_running_loop().time()}"
|
| 81 |
+
result_generator = self.engine.generate(
|
| 82 |
+
prompt,
|
| 83 |
+
sp, # SamplingParams(...)
|
| 84 |
+
request_id,
|
| 85 |
+
lora_request=self.current_lora_request,
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
async for out in result_generator: # with FINAL_ONLY this runs once
|
| 89 |
+
res = out
|
| 90 |
+
|
| 91 |
+
raw_text = res.outputs[0].text
|
| 92 |
+
out_token_ids = res.outputs[0].token_ids
|
| 93 |
+
log_probs = [
|
| 94 |
+
logprob_dict[token_id].logprob
|
| 95 |
+
for token_id, logprob_dict in zip(out_token_ids, res.outputs[0].logprobs)
|
| 96 |
+
]
|
| 97 |
+
log_probs = torch.tensor(log_probs)
|
| 98 |
+
out_token_ids = torch.tensor(out_token_ids, dtype=torch.long)
|
| 99 |
+
# for out_token_id, logprob_dict in zip(out_token_ids, res.outputs[0].logprobs):
|
| 100 |
+
# if logprob_dict[out_token_id].logprob < -1:
|
| 101 |
+
# print(f"High negative logprob {logprob_dict[out_token_id].logprob} for {logprob_dict}")
|
| 102 |
+
content = raw_text
|
| 103 |
+
reasoning_content = None
|
| 104 |
+
|
| 105 |
+
if extract_thinking:
|
| 106 |
+
m = re.match(
|
| 107 |
+
r"^\n<think>\n([\s\S]*?)</think>\n\n(.*)$", raw_text, flags=re.DOTALL
|
| 108 |
+
)
|
| 109 |
+
if m:
|
| 110 |
+
reasoning_content = m.group(1)
|
| 111 |
+
content = m.group(2)
|
| 112 |
+
return LLMInferenceOutput(
|
| 113 |
+
content=content,
|
| 114 |
+
reasoning_content=reasoning_content,
|
| 115 |
+
log_probs=log_probs,
|
| 116 |
+
out_token_ids=out_token_ids,
|
| 117 |
+
)
|
src_code_for_reproducibility/models/inference_backend_vllm_local_server.py
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import subprocess
|
| 4 |
+
import time
|
| 5 |
+
|
| 6 |
+
import httpx
|
| 7 |
+
import requests
|
| 8 |
+
|
| 9 |
+
from mllm.models.inference_backend import LLMInferenceBackend
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class HttpVLLMBackend(LLMInferenceBackend):
|
| 13 |
+
def __init__(self, **kwargs):
|
| 14 |
+
super().__init__(**kwargs)
|
| 15 |
+
self.port = kwargs.get("port", 8000)
|
| 16 |
+
self.host = kwargs.get("host", "0.0.0.0")
|
| 17 |
+
self.proc = None
|
| 18 |
+
self.base_url = f"http://{self.host}:{self.port}"
|
| 19 |
+
# vLLM memory safety knobs
|
| 20 |
+
self.gpu_mem_util = kwargs.get("gpu_memory_utilization", 0.9)
|
| 21 |
+
self.max_model_len = kwargs.get("max_model_len", None)
|
| 22 |
+
self.max_num_seqs = kwargs.get("max_num_seqs", None)
|
| 23 |
+
self.max_batched_tokens = kwargs.get("max_num_batched_tokens", None)
|
| 24 |
+
self.dtype = kwargs.get("dtype", "bfloat16")
|
| 25 |
+
self.trust_remote_code = kwargs.get("trust_remote_code", False)
|
| 26 |
+
# LoRA strategy: "preload" (CLI) or "runtime" (endpoints) depending on your vLLM build
|
| 27 |
+
self.lora_mode = kwargs.get(
|
| 28 |
+
"lora_mode", "preload"
|
| 29 |
+
) # "runtime" supported in newer builds
|
| 30 |
+
self.runtime_lora_enabled = self.lora_mode == "runtime"
|
| 31 |
+
|
| 32 |
+
# If preloading: build CLI args (adapter name -> path)
|
| 33 |
+
self._preload_lora_args = []
|
| 34 |
+
if self.adapter_paths and self.lora_mode == "preload":
|
| 35 |
+
# vLLM supports multiple LoRA modules via CLI in recent versions
|
| 36 |
+
# Example flag shapes can vary; adapt as needed for your version:
|
| 37 |
+
# --lora-modules adapter_id=path
|
| 38 |
+
for aid, pth in self.adapter_paths.items():
|
| 39 |
+
self._preload_lora_args += ["--lora-modules", f"{aid}={pth}"]
|
| 40 |
+
|
| 41 |
+
def launch(self):
|
| 42 |
+
# Build vLLM serve command
|
| 43 |
+
cmd = [
|
| 44 |
+
"python3",
|
| 45 |
+
"-m",
|
| 46 |
+
"vllm.entrypoints.openai.api_server",
|
| 47 |
+
"--model",
|
| 48 |
+
self.model_name,
|
| 49 |
+
"--host",
|
| 50 |
+
self.host,
|
| 51 |
+
"--port",
|
| 52 |
+
str(self.port),
|
| 53 |
+
"--dtype",
|
| 54 |
+
self.dtype,
|
| 55 |
+
"--gpu-memory-utilization",
|
| 56 |
+
str(self.gpu_mem_util),
|
| 57 |
+
]
|
| 58 |
+
if self.trust_remote_code:
|
| 59 |
+
cmd += ["--trust-remote-code"]
|
| 60 |
+
if self.max_model_len:
|
| 61 |
+
cmd += ["--max-model-len", str(self.max_model_len)]
|
| 62 |
+
if self.max_num_seqs:
|
| 63 |
+
cmd += ["--max-num-seqs", str(self.max_num_seqs)]
|
| 64 |
+
if self.max_batched_tokens:
|
| 65 |
+
cmd += ["--max-num-batched-tokens", str(self.max_batched_tokens)]
|
| 66 |
+
cmd += self._preload_lora_args
|
| 67 |
+
|
| 68 |
+
self.proc = subprocess.Popen(
|
| 69 |
+
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True
|
| 70 |
+
)
|
| 71 |
+
self._wait_ready()
|
| 72 |
+
|
| 73 |
+
def _wait_ready(self, timeout=120):
|
| 74 |
+
url = f"{self.base_url}/v1/models"
|
| 75 |
+
t0 = time.time()
|
| 76 |
+
while time.time() - t0 < timeout:
|
| 77 |
+
try:
|
| 78 |
+
r = requests.get(url, timeout=2)
|
| 79 |
+
if r.status_code == 200:
|
| 80 |
+
return
|
| 81 |
+
except Exception:
|
| 82 |
+
pass
|
| 83 |
+
time.sleep(1)
|
| 84 |
+
raise RuntimeError("vLLM server did not become ready in time")
|
| 85 |
+
|
| 86 |
+
def is_ready(self) -> bool:
|
| 87 |
+
try:
|
| 88 |
+
return (
|
| 89 |
+
requests.get(f"{self.base_url}/v1/models", timeout=2).status_code == 200
|
| 90 |
+
)
|
| 91 |
+
except Exception:
|
| 92 |
+
return False
|
| 93 |
+
|
| 94 |
+
def prepare_adapter(self, adapter_id: str) -> None:
|
| 95 |
+
if not adapter_id or not self.runtime_lora_enabled:
|
| 96 |
+
return
|
| 97 |
+
# Newer vLLM builds expose runtime LoRA endpoints. If yours differs,
|
| 98 |
+
# adjust the path/body here and keep the interface stable.
|
| 99 |
+
try:
|
| 100 |
+
requests.post(
|
| 101 |
+
f"{self.base_url}/v1/load_lora_adapter",
|
| 102 |
+
json={
|
| 103 |
+
"adapter_name": adapter_id,
|
| 104 |
+
"adapter_path": self.adapter_paths[adapter_id],
|
| 105 |
+
},
|
| 106 |
+
timeout=10,
|
| 107 |
+
).raise_for_status()
|
| 108 |
+
except Exception as e:
|
| 109 |
+
# If already loaded or endpoint not present, swallow or log
|
| 110 |
+
pass
|
| 111 |
+
|
| 112 |
+
async def generate(
|
| 113 |
+
self, prompt_text: str, sampling_params: dict, adapter_id: str | None
|
| 114 |
+
) -> str:
|
| 115 |
+
# Map your sampling params to OpenAI schema
|
| 116 |
+
body = {
|
| 117 |
+
"model": self.model_name,
|
| 118 |
+
"messages": [{"role": "user", "content": prompt_text}],
|
| 119 |
+
"temperature": sampling_params.get("temperature", 1.0),
|
| 120 |
+
"top_p": sampling_params.get("top_p", 1.0),
|
| 121 |
+
"max_tokens": sampling_params.get("max_new_tokens", 128),
|
| 122 |
+
}
|
| 123 |
+
# Optional knobs:
|
| 124 |
+
if sampling_params.get("top_k", -1) and sampling_params["top_k"] > 0:
|
| 125 |
+
# vLLM accepts top_k via extra params; put under "extra_body"
|
| 126 |
+
body.setdefault("extra_body", {})["top_k"] = sampling_params["top_k"]
|
| 127 |
+
if sampling_params.get("min_new_tokens", None) is not None:
|
| 128 |
+
body.setdefault("extra_body", {})["min_tokens"] = sampling_params[
|
| 129 |
+
"min_new_tokens"
|
| 130 |
+
]
|
| 131 |
+
if sampling_params.get("frequency_penalty", None) is not None:
|
| 132 |
+
body["frequency_penalty"] = sampling_params["frequency_penalty"]
|
| 133 |
+
|
| 134 |
+
# Select LoRA adapter
|
| 135 |
+
if adapter_id:
|
| 136 |
+
if self.runtime_lora_enabled:
|
| 137 |
+
body.setdefault("extra_body", {})["lora_adapter"] = adapter_id
|
| 138 |
+
else:
|
| 139 |
+
# when preloaded via CLI, most builds select by name via "adapter_name"/"lora_adapter"
|
| 140 |
+
body.setdefault("extra_body", {})["lora_adapter"] = adapter_id
|
| 141 |
+
|
| 142 |
+
url = f"{self.base_url}/v1/chat/completions"
|
| 143 |
+
timeout = httpx.Timeout(3600.0, connect=3600.0)
|
| 144 |
+
async with httpx.AsyncClient(timeout=timeout) as client:
|
| 145 |
+
resp = await client.post(url, json=body)
|
| 146 |
+
resp.raise_for_status()
|
| 147 |
+
data = resp.json()
|
| 148 |
+
return data["choices"][0]["message"]["content"]
|
| 149 |
+
|
| 150 |
+
def toggle_training_mode(self) -> None:
|
| 151 |
+
# vLLM doesn’t expose an explicit KV “release” toggle via API.
|
| 152 |
+
# Strategy: keep inference server idle during training, or run training in a separate process.
|
| 153 |
+
pass
|
| 154 |
+
|
| 155 |
+
def toggle_eval_mode(self) -> None:
|
| 156 |
+
pass
|
| 157 |
+
|
| 158 |
+
def shutdown(self) -> None:
|
| 159 |
+
if self.proc:
|
| 160 |
+
self.proc.terminate()
|
src_code_for_reproducibility/models/large_language_model_api.py
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import asyncio
|
| 4 |
+
import copy
|
| 5 |
+
import os
|
| 6 |
+
import random
|
| 7 |
+
import re
|
| 8 |
+
from typing import Any, Callable, Dict, List, Optional, Sequence
|
| 9 |
+
|
| 10 |
+
import backoff
|
| 11 |
+
from openai import AsyncOpenAI, OpenAIError
|
| 12 |
+
|
| 13 |
+
from mllm.markov_games.rollout_tree import ChatTurn
|
| 14 |
+
from mllm.models.inference_backend import LLMInferenceOutput
|
| 15 |
+
|
| 16 |
+
# TODO: Get this automatically from OpenAI
|
| 17 |
+
reasoning_models = [
|
| 18 |
+
"gpt-5-nano",
|
| 19 |
+
"gpt-5-mini",
|
| 20 |
+
"gpt-5",
|
| 21 |
+
"o1-mini",
|
| 22 |
+
"o1",
|
| 23 |
+
"o1-pro",
|
| 24 |
+
"o3-mini",
|
| 25 |
+
"o3",
|
| 26 |
+
"o3-pro",
|
| 27 |
+
"o4-mini",
|
| 28 |
+
"o4",
|
| 29 |
+
"o4-pro",
|
| 30 |
+
]
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class LargeLanguageModelOpenAI:
|
| 34 |
+
"""Tiny async wrapper for OpenAI Chat Completions."""
|
| 35 |
+
|
| 36 |
+
def __init__(
|
| 37 |
+
self,
|
| 38 |
+
llm_id: str = "",
|
| 39 |
+
model: str = "gpt-4.1-mini",
|
| 40 |
+
api_key: Optional[str] = None,
|
| 41 |
+
base_url: Optional[str] = None,
|
| 42 |
+
timeout_s: float = 300.0,
|
| 43 |
+
regex_max_attempts: int = 10,
|
| 44 |
+
sampling_params: Optional[Dict[str, Any]] = None,
|
| 45 |
+
init_kwargs: Optional[Dict[str, Any]] = None,
|
| 46 |
+
output_directory: Optional[str] = None,
|
| 47 |
+
) -> None:
|
| 48 |
+
self.llm_id = llm_id
|
| 49 |
+
self.model = model
|
| 50 |
+
key = api_key or os.getenv("OPENAI_API_KEY")
|
| 51 |
+
if not key:
|
| 52 |
+
raise RuntimeError(
|
| 53 |
+
"Set OPENAI_API_KEY as global environment variable or pass api_key."
|
| 54 |
+
)
|
| 55 |
+
client_kwargs: Dict[str, Any] = {"api_key": key, "timeout": timeout_s}
|
| 56 |
+
if base_url:
|
| 57 |
+
client_kwargs["base_url"] = base_url
|
| 58 |
+
self.client = AsyncOpenAI(**client_kwargs)
|
| 59 |
+
|
| 60 |
+
# Sampling/default request params set at init
|
| 61 |
+
self.sampling_params = sampling_params
|
| 62 |
+
self.use_reasoning = model in reasoning_models
|
| 63 |
+
if self.use_reasoning:
|
| 64 |
+
self.sampling_params["reasoning"] = {
|
| 65 |
+
"effort": "low",
|
| 66 |
+
"summary": "detailed",
|
| 67 |
+
}
|
| 68 |
+
self.regex_max_attempts = max(1, int(regex_max_attempts))
|
| 69 |
+
|
| 70 |
+
def get_inference_policies(self) -> Dict[str, Callable]:
|
| 71 |
+
return {
|
| 72 |
+
self.llm_id: self.get_action,
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
async def prepare_adapter_for_inference(self, *args: Any, **kwargs: Any) -> None:
|
| 76 |
+
await asyncio.sleep(0)
|
| 77 |
+
pass
|
| 78 |
+
|
| 79 |
+
async def toggle_eval_mode(self, *args: Any, **kwargs: Any) -> None:
|
| 80 |
+
await asyncio.sleep(0)
|
| 81 |
+
pass
|
| 82 |
+
|
| 83 |
+
async def toggle_training_mode(self, *args: Any, **kwargs: Any) -> None:
|
| 84 |
+
await asyncio.sleep(0)
|
| 85 |
+
pass
|
| 86 |
+
|
| 87 |
+
async def export_adapters(self, *args: Any, **kwargs: Any) -> None:
|
| 88 |
+
await asyncio.sleep(0)
|
| 89 |
+
pass
|
| 90 |
+
|
| 91 |
+
async def checkpoint_all_adapters(self, *args: Any, **kwargs: Any) -> None:
|
| 92 |
+
await asyncio.sleep(0)
|
| 93 |
+
pass
|
| 94 |
+
|
| 95 |
+
def extract_output_from_response(self, resp: Response) -> LLMInferenceOutput:
|
| 96 |
+
if len(resp.output) > 1:
|
| 97 |
+
summary = resp.output[0].summary
|
| 98 |
+
if summary != []:
|
| 99 |
+
reasoning_content = summary[0].text
|
| 100 |
+
reasoning_content = f"OpenAI Reasoning Summary: {reasoning_content}"
|
| 101 |
+
else:
|
| 102 |
+
reasoning_content = None
|
| 103 |
+
content = resp.output[1].content[0].text
|
| 104 |
+
else:
|
| 105 |
+
reasoning_content = None
|
| 106 |
+
content = resp.output[0].content[0].text
|
| 107 |
+
|
| 108 |
+
return LLMInferenceOutput(
|
| 109 |
+
content=content,
|
| 110 |
+
reasoning_content=reasoning_content,
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
@backoff.on_exception(
|
| 114 |
+
backoff.expo, Exception, max_time=10**10, max_tries=10**10
|
| 115 |
+
)
|
| 116 |
+
async def get_action(
|
| 117 |
+
self,
|
| 118 |
+
state: list[ChatTurn],
|
| 119 |
+
agent_id: str,
|
| 120 |
+
regex: Optional[str] = None,
|
| 121 |
+
) -> LLMInferenceOutput:
|
| 122 |
+
# Remove any non-role/content keys from the prompt else openai will error
|
| 123 |
+
|
| 124 |
+
# TODO:
|
| 125 |
+
prompt = [{"role": p.role, "content": p.content} for p in state]
|
| 126 |
+
|
| 127 |
+
# if self.sleep_between_requests:
|
| 128 |
+
# await self.wait_random_time()
|
| 129 |
+
|
| 130 |
+
# If regex is required, prime the model and validate client-side
|
| 131 |
+
if regex:
|
| 132 |
+
constraint_msg = {
|
| 133 |
+
"role": "user",
|
| 134 |
+
"content": (
|
| 135 |
+
f"Output must match this regex exactly: {regex} \n"
|
| 136 |
+
"Return only the matching string, with no quotes or extra text."
|
| 137 |
+
),
|
| 138 |
+
}
|
| 139 |
+
prompt = [constraint_msg, *prompt]
|
| 140 |
+
pattern = re.compile(regex)
|
| 141 |
+
for _ in range(self.regex_max_attempts):
|
| 142 |
+
resp = await self.client.responses.create(
|
| 143 |
+
model=self.model,
|
| 144 |
+
input=prompt,
|
| 145 |
+
**self.sampling_params,
|
| 146 |
+
)
|
| 147 |
+
policy_output = self.extract_output_from_response(resp)
|
| 148 |
+
if pattern.fullmatch(policy_output.content):
|
| 149 |
+
return policy_output
|
| 150 |
+
prompt = [
|
| 151 |
+
*prompt,
|
| 152 |
+
{
|
| 153 |
+
"role": "user",
|
| 154 |
+
"content": (
|
| 155 |
+
f"Invalid response format. Expected format (regex): {regex}\n Please try again and provide ONLY a response that matches this regex."
|
| 156 |
+
),
|
| 157 |
+
},
|
| 158 |
+
]
|
| 159 |
+
return policy_output
|
| 160 |
+
|
| 161 |
+
# Simple, unconstrained generation
|
| 162 |
+
resp = await self.client.responses.create(
|
| 163 |
+
model=self.model,
|
| 164 |
+
input=prompt,
|
| 165 |
+
**self.sampling_params,
|
| 166 |
+
)
|
| 167 |
+
policy_output = self.extract_output_from_response(resp)
|
| 168 |
+
return policy_output
|
| 169 |
+
|
| 170 |
+
def shutdown(self) -> None:
|
| 171 |
+
self.client = None
|
src_code_for_reproducibility/models/large_language_model_local.py
ADDED
|
@@ -0,0 +1,384 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
TODO: Figure out how to tweak SGlang not to go OOM when batch size is 32. See https://github.com/sgl-project/sglang/issues/6309.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import logging
|
| 6 |
+
import os
|
| 7 |
+
import re
|
| 8 |
+
import sys
|
| 9 |
+
import uuid
|
| 10 |
+
from collections.abc import Callable
|
| 11 |
+
from copy import deepcopy
|
| 12 |
+
from datetime import datetime
|
| 13 |
+
from typing import Literal
|
| 14 |
+
|
| 15 |
+
import httpx
|
| 16 |
+
import requests
|
| 17 |
+
import torch
|
| 18 |
+
import torch.nn as nn
|
| 19 |
+
|
| 20 |
+
# from sglang.utils import (
|
| 21 |
+
# launch_server_cmd,
|
| 22 |
+
# print_highlight,
|
| 23 |
+
# terminate_process,
|
| 24 |
+
# wait_for_server,
|
| 25 |
+
# )
|
| 26 |
+
from torch.optim import SGD, Adam, AdamW, RMSprop
|
| 27 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 28 |
+
from trl import AutoModelForCausalLMWithValueHead
|
| 29 |
+
|
| 30 |
+
from mllm.chat_utils.apply_template import chat_turns_to_token_ids
|
| 31 |
+
from mllm.markov_games.rollout_tree import ChatTurn
|
| 32 |
+
from mllm.models.adapter_training_wrapper import AdapterWrapper
|
| 33 |
+
from mllm.models.inference_backend import LLMInferenceOutput
|
| 34 |
+
from mllm.models.inference_backend_dummy import DummyInferenceBackend
|
| 35 |
+
from mllm.models.inference_backend_sglang import SGLangOfflineBackend
|
| 36 |
+
from mllm.models.inference_backend_vllm import VLLMAsyncBackend
|
| 37 |
+
|
| 38 |
+
logger = logging.getLogger(__name__)
|
| 39 |
+
logger.addHandler(logging.StreamHandler(sys.stdout))
|
| 40 |
+
|
| 41 |
+
AdapterID = str
|
| 42 |
+
PolicyID = str
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class LeanLocalLLM:
|
| 46 |
+
"""
|
| 47 |
+
TOWRITE
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
def __init__(
|
| 51 |
+
self,
|
| 52 |
+
llm_id: str = "base_llm",
|
| 53 |
+
model_name: str = "Qwen/Qwen3-4B-Instruct-2507",
|
| 54 |
+
device: str = "cuda",
|
| 55 |
+
hf_kwargs: dict = {},
|
| 56 |
+
adapter_configs: dict = {},
|
| 57 |
+
output_directory: str = "./models/",
|
| 58 |
+
inference_backend: Literal["vllm", "sglang", "dummy"] = "vllm",
|
| 59 |
+
inference_backend_sampling_params: dict = {},
|
| 60 |
+
inference_backend_init_kwargs: dict = {},
|
| 61 |
+
initial_adapter_paths: dict[str, str] | None = None,
|
| 62 |
+
initial_buffer_paths: list[str] | None = None,
|
| 63 |
+
enable_thinking: bool = None,
|
| 64 |
+
regex_max_attempts: int = -1,
|
| 65 |
+
max_thinking_characters: int = 0,
|
| 66 |
+
):
|
| 67 |
+
self.inference_backend_name = inference_backend
|
| 68 |
+
self.output_directory = output_directory
|
| 69 |
+
self.llm_id = llm_id
|
| 70 |
+
self.device = torch.device(device) if device else torch.device("cuda")
|
| 71 |
+
self.model_name = model_name
|
| 72 |
+
self.adapter_configs = adapter_configs
|
| 73 |
+
self.adapter_ids = list(adapter_configs.keys())
|
| 74 |
+
self.enable_thinking = enable_thinking
|
| 75 |
+
self.regex_max_attempts = regex_max_attempts
|
| 76 |
+
self.initial_buffer_paths = initial_buffer_paths
|
| 77 |
+
self.max_thinking_characters = max_thinking_characters
|
| 78 |
+
self.regex_retries_count = 0
|
| 79 |
+
|
| 80 |
+
# Optional user-specified initial adapter weight locations (local or HF Hub)
|
| 81 |
+
# Format: {adapter_id: path_or_repo_id}
|
| 82 |
+
self.initial_adapter_paths: dict[str, str] | None = initial_adapter_paths
|
| 83 |
+
|
| 84 |
+
# Path management / imports
|
| 85 |
+
self.save_path = str(os.path.join(output_directory, model_name, "adapters"))
|
| 86 |
+
self.adapter_paths = {
|
| 87 |
+
adapter_id: os.path.join(self.save_path, adapter_id)
|
| 88 |
+
for adapter_id in self.adapter_ids
|
| 89 |
+
}
|
| 90 |
+
checkpoints_dir = os.path.join(self.output_directory, "checkpoints")
|
| 91 |
+
self.past_agent_adapter_paths = {}
|
| 92 |
+
if os.path.isdir(checkpoints_dir):
|
| 93 |
+
for dirname in os.listdir(checkpoints_dir):
|
| 94 |
+
dirpath = os.path.join(checkpoints_dir, dirname)
|
| 95 |
+
if os.path.isdir(dirpath):
|
| 96 |
+
self.past_agent_adapter_paths[f"{dirname}_buffer"] = os.path.join(
|
| 97 |
+
dirpath, "agent_adapter"
|
| 98 |
+
)
|
| 99 |
+
logger.info(
|
| 100 |
+
f"Loaded {len(self.past_agent_adapter_paths)} past agent adapters from checkpoints directory."
|
| 101 |
+
)
|
| 102 |
+
if self.initial_buffer_paths is not None:
|
| 103 |
+
previous_count = len(self.past_agent_adapter_paths)
|
| 104 |
+
for path in self.initial_buffer_paths:
|
| 105 |
+
if os.path.isdir(path):
|
| 106 |
+
for dirname in os.listdir(path):
|
| 107 |
+
dirpath = os.path.join(path, dirname)
|
| 108 |
+
if os.path.isdir(dirpath):
|
| 109 |
+
self.past_agent_adapter_paths[
|
| 110 |
+
f"{dirname}_buffer"
|
| 111 |
+
] = os.path.join(dirpath, "agent_adapter")
|
| 112 |
+
else:
|
| 113 |
+
logger.warning(
|
| 114 |
+
f"Initial buffer path {path} does not exist or is not a directory."
|
| 115 |
+
)
|
| 116 |
+
logger.info(
|
| 117 |
+
f"Loaded {len(self.past_agent_adapter_paths) - previous_count} past agent adapters from user-specified initial buffer paths."
|
| 118 |
+
)
|
| 119 |
+
self.past_agent_adapter_ids = list(self.past_agent_adapter_paths.keys())
|
| 120 |
+
|
| 121 |
+
# ID management for tracking adapter versions
|
| 122 |
+
self.adapter_train_ids = {
|
| 123 |
+
adapter_id: self.short_id_generator() for adapter_id in self.adapter_ids
|
| 124 |
+
}
|
| 125 |
+
# Initialize tokenizer
|
| 126 |
+
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
|
| 127 |
+
# Setup padding token to be same as EOS token
|
| 128 |
+
self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
|
| 129 |
+
self.tokenizer.pad_token = self.tokenizer.eos_token
|
| 130 |
+
|
| 131 |
+
self.weights_got_updated: dict[AdapterID, bool] = {
|
| 132 |
+
adapter_id: False for adapter_id in self.adapter_ids
|
| 133 |
+
}
|
| 134 |
+
self.weights_got_updated.update(
|
| 135 |
+
{adapter_id: False for adapter_id in self.past_agent_adapter_ids}
|
| 136 |
+
)
|
| 137 |
+
self.current_lora_request = None
|
| 138 |
+
self.currently_loaded_adapter_id = None
|
| 139 |
+
|
| 140 |
+
# ---------------------------------------------------------
|
| 141 |
+
# Init HF model, peft adapters
|
| 142 |
+
# ---------------------------------------------------------
|
| 143 |
+
self.shared_hf_llm = AutoModelForCausalLM.from_pretrained(
|
| 144 |
+
pretrained_model_name_or_path=model_name,
|
| 145 |
+
**hf_kwargs,
|
| 146 |
+
)
|
| 147 |
+
self.hf_adapters = {}
|
| 148 |
+
self.optimizers = {}
|
| 149 |
+
for adapter_id in self.adapter_ids:
|
| 150 |
+
# Prefer output-folder path if it exists; else fall back to user-specified initial path if provided
|
| 151 |
+
output_path = os.path.join(self.save_path, adapter_id)
|
| 152 |
+
chosen_path: str | None = None
|
| 153 |
+
if os.path.isdir(output_path) and os.listdir(output_path):
|
| 154 |
+
chosen_path = output_path
|
| 155 |
+
logger.info(
|
| 156 |
+
f"Initializing adapter '{adapter_id}': using existing weights from output folder '{chosen_path}'."
|
| 157 |
+
)
|
| 158 |
+
elif (
|
| 159 |
+
self.initial_adapter_paths and adapter_id in self.initial_adapter_paths
|
| 160 |
+
):
|
| 161 |
+
chosen_path = self.initial_adapter_paths[adapter_id]
|
| 162 |
+
logger.info(
|
| 163 |
+
f"Initializing adapter '{adapter_id}': using provided initial path '{chosen_path}'."
|
| 164 |
+
)
|
| 165 |
+
else:
|
| 166 |
+
logger.info(
|
| 167 |
+
f"Initializing adapter '{adapter_id}': no initial weights provided or found; starting from scratch."
|
| 168 |
+
)
|
| 169 |
+
hf_adapter = AdapterWrapper(
|
| 170 |
+
shared_llm=self.shared_hf_llm,
|
| 171 |
+
adapter_id=adapter_id,
|
| 172 |
+
lora_config=adapter_configs[adapter_id],
|
| 173 |
+
path=chosen_path,
|
| 174 |
+
).to(device)
|
| 175 |
+
self.hf_adapters[adapter_id] = hf_adapter
|
| 176 |
+
# Persist current state of all adapters (ensures remote loads are cached to disk)
|
| 177 |
+
self.export_adapters()
|
| 178 |
+
|
| 179 |
+
# ---------------------------------------------------------
|
| 180 |
+
# Init inference inference_backend
|
| 181 |
+
# ---------------------------------------------------------
|
| 182 |
+
|
| 183 |
+
if inference_backend == "sglang":
|
| 184 |
+
self.inference_backend = SGLangOfflineBackend(
|
| 185 |
+
model_name=self.model_name,
|
| 186 |
+
save_path=self.save_path,
|
| 187 |
+
adapter_paths=self.adapter_paths,
|
| 188 |
+
tokenizer=self.tokenizer,
|
| 189 |
+
kwargs=inference_backend_init_kwargs,
|
| 190 |
+
)
|
| 191 |
+
elif inference_backend == "vllm":
|
| 192 |
+
self.inference_backend = VLLMAsyncBackend(
|
| 193 |
+
model_name=self.model_name,
|
| 194 |
+
# adapter_paths=self.adapter_paths,
|
| 195 |
+
tokenizer=self.tokenizer,
|
| 196 |
+
engine_init_kwargs=inference_backend_init_kwargs,
|
| 197 |
+
sampling_params=inference_backend_sampling_params,
|
| 198 |
+
)
|
| 199 |
+
elif inference_backend == "dummy":
|
| 200 |
+
self.inference_backend = DummyInferenceBackend()
|
| 201 |
+
else:
|
| 202 |
+
raise ValueError(f"Unknown inference_backend: {inference_backend}")
|
| 203 |
+
|
| 204 |
+
def reset_regex_retries_count(self) -> None:
|
| 205 |
+
self.regex_retries_count = 0
|
| 206 |
+
|
| 207 |
+
def get_inference_policies(self) -> dict[PolicyID, Callable]:
|
| 208 |
+
"""
|
| 209 |
+
TOWRITE
|
| 210 |
+
"""
|
| 211 |
+
policies = {}
|
| 212 |
+
for adapter_id in self.adapter_ids:
|
| 213 |
+
# define policy func
|
| 214 |
+
async def policy(
|
| 215 |
+
state: list[ChatTurn],
|
| 216 |
+
agent_id: str,
|
| 217 |
+
regex: str | None = None,
|
| 218 |
+
_adapter_id=adapter_id,
|
| 219 |
+
):
|
| 220 |
+
self.prepare_adapter_for_inference(adapter_id=_adapter_id)
|
| 221 |
+
response = await self.get_action(state, agent_id, regex)
|
| 222 |
+
return response
|
| 223 |
+
|
| 224 |
+
policies[self.llm_id + "/" + adapter_id] = policy
|
| 225 |
+
|
| 226 |
+
for adapter_id in self.past_agent_adapter_ids:
|
| 227 |
+
# define policy func
|
| 228 |
+
async def policy(
|
| 229 |
+
state: list[ChatTurn],
|
| 230 |
+
agent_id: str,
|
| 231 |
+
regex: str | None = None,
|
| 232 |
+
_adapter_id=adapter_id,
|
| 233 |
+
):
|
| 234 |
+
self.prepare_adapter_for_inference(adapter_id=_adapter_id)
|
| 235 |
+
response = await self.get_action(state, agent_id, regex)
|
| 236 |
+
return response
|
| 237 |
+
|
| 238 |
+
policies[self.llm_id + "/" + adapter_id] = policy
|
| 239 |
+
return policies
|
| 240 |
+
|
| 241 |
+
def get_adapter_modules(self) -> dict[PolicyID, nn.Module]:
|
| 242 |
+
"""
|
| 243 |
+
Returns wrappers over the adapters which allows them be
|
| 244 |
+
interfaced like regular PyTorch models.
|
| 245 |
+
# TODO: create the adapter wrappers here
|
| 246 |
+
See adapter_wrapper.py
|
| 247 |
+
"""
|
| 248 |
+
trainable_objects = {an: self.hf_adapters[an] for an in self.adapter_ids}
|
| 249 |
+
return trainable_objects
|
| 250 |
+
|
| 251 |
+
async def toggle_training_mode(self) -> None:
|
| 252 |
+
for adn in self.adapter_ids:
|
| 253 |
+
self.adapter_train_ids[adn] = self.short_id_generator()
|
| 254 |
+
await self.inference_backend.toggle_training_mode()
|
| 255 |
+
|
| 256 |
+
async def toggle_eval_mode(self) -> None:
|
| 257 |
+
await self.inference_backend.toggle_eval_mode()
|
| 258 |
+
|
| 259 |
+
def prepare_adapter_for_inference(self, adapter_id: AdapterID) -> None:
|
| 260 |
+
self.inference_backend.prepare_adapter(
|
| 261 |
+
adapter_id,
|
| 262 |
+
adapter_path=self.adapter_paths.get(
|
| 263 |
+
adapter_id, self.past_agent_adapter_paths.get(adapter_id, None)
|
| 264 |
+
),
|
| 265 |
+
weights_got_updated=self.weights_got_updated[adapter_id],
|
| 266 |
+
)
|
| 267 |
+
self.currently_loaded_adapter_id = adapter_id
|
| 268 |
+
self.weights_got_updated[adapter_id] = False
|
| 269 |
+
|
| 270 |
+
# def _make_prompt_text(self, prompt: list[dict]) -> str:
|
| 271 |
+
# if self.enable_thinking is not None:
|
| 272 |
+
# prompt_text = self.tokenizer.apply_chat_template(
|
| 273 |
+
# prompt,
|
| 274 |
+
# tokenize=False,
|
| 275 |
+
# add_generation_prompt=True,
|
| 276 |
+
# enable_thinking=self.enable_thinking,
|
| 277 |
+
# )
|
| 278 |
+
# else:
|
| 279 |
+
# prompt_text = self.tokenizer.apply_chat_template(
|
| 280 |
+
# prompt,
|
| 281 |
+
# tokenize=False,
|
| 282 |
+
# add_generation_prompt=True,
|
| 283 |
+
# )
|
| 284 |
+
|
| 285 |
+
# return prompt_text
|
| 286 |
+
|
| 287 |
+
async def get_action(
|
| 288 |
+
self, state: list[ChatTurn], agent_id: str, regex: str | None = None
|
| 289 |
+
) -> ChatTurn:
|
| 290 |
+
current_regex = regex if self.regex_max_attempts == -1 else None
|
| 291 |
+
pattern = re.compile(regex) if regex else None
|
| 292 |
+
nb_attempts = 0
|
| 293 |
+
state = state[:]
|
| 294 |
+
while True:
|
| 295 |
+
context_token_ids = chat_turns_to_token_ids(
|
| 296 |
+
chats=state,
|
| 297 |
+
tokenizer=self.tokenizer,
|
| 298 |
+
enable_thinking=self.enable_thinking,
|
| 299 |
+
)
|
| 300 |
+
# print(f"context is {self.tokenizer.decode(context_token_ids)}")
|
| 301 |
+
policy_output = await self.inference_backend.generate(
|
| 302 |
+
input_token_ids=context_token_ids.tolist(),
|
| 303 |
+
extract_thinking=(self.max_thinking_characters > 0),
|
| 304 |
+
regex=current_regex,
|
| 305 |
+
)
|
| 306 |
+
# print(f"generated: {self.tokenizer.decode(policy_output.out_token_ids)}")
|
| 307 |
+
if (
|
| 308 |
+
pattern is None
|
| 309 |
+
or (pattern.fullmatch(policy_output.content))
|
| 310 |
+
or (nb_attempts >= self.regex_max_attempts)
|
| 311 |
+
):
|
| 312 |
+
return ChatTurn(
|
| 313 |
+
agent_id=agent_id,
|
| 314 |
+
role="assistant",
|
| 315 |
+
content=policy_output.content,
|
| 316 |
+
reasoning_content=policy_output.reasoning_content,
|
| 317 |
+
out_token_ids=policy_output.out_token_ids,
|
| 318 |
+
log_probs=policy_output.log_probs,
|
| 319 |
+
is_state_end=False,
|
| 320 |
+
)
|
| 321 |
+
else:
|
| 322 |
+
self.regex_retries_count += 1
|
| 323 |
+
nb_attempts += 1
|
| 324 |
+
logger.warning(
|
| 325 |
+
f"Response {policy_output.content} did not match regex: {regex}, retry {nb_attempts}/{self.regex_max_attempts}"
|
| 326 |
+
)
|
| 327 |
+
if nb_attempts == self.regex_max_attempts:
|
| 328 |
+
current_regex = regex
|
| 329 |
+
# regex_prompt = ChatTurn(
|
| 330 |
+
# role="user",
|
| 331 |
+
# content=f"Invalid response format. Expected format (regex): {current_regex}\n Please try again and provide ONLY a response that matches this regex.",
|
| 332 |
+
# reasoning_content=None,
|
| 333 |
+
# log_probs=None,
|
| 334 |
+
# out_token_ids=None,
|
| 335 |
+
# is_state_end=False,
|
| 336 |
+
# )
|
| 337 |
+
# state.append(regex_prompt)
|
| 338 |
+
|
| 339 |
+
def export_adapters(self) -> None:
|
| 340 |
+
"""
|
| 341 |
+
Any peft wrapper, by default, saves all adapters, not just the one currently loaded.
|
| 342 |
+
"""
|
| 343 |
+
|
| 344 |
+
# New version of the adapters available
|
| 345 |
+
for adapter_id in self.adapter_ids:
|
| 346 |
+
self.weights_got_updated[adapter_id] = True
|
| 347 |
+
for adapter_id in self.past_agent_adapter_ids:
|
| 348 |
+
self.weights_got_updated[adapter_id] = True
|
| 349 |
+
|
| 350 |
+
# import random
|
| 351 |
+
# self.save_path = self.save_path + str(random.randint(1,500))
|
| 352 |
+
# print(f"Save path: {self.save_path}")
|
| 353 |
+
# self.adapter_paths = {adapter_id:os.path.join(self.save_path, adapter_id) for adapter_id in self.adapter_ids}
|
| 354 |
+
|
| 355 |
+
adapter_id = self.adapter_ids[0]
|
| 356 |
+
self.hf_adapters[adapter_id].save_pretrained(self.save_path)
|
| 357 |
+
|
| 358 |
+
def checkpoint_all_adapters(self, checkpoint_indicator: str) -> None:
|
| 359 |
+
"""
|
| 360 |
+
Checkpoints all adapters to the configured output directory.
|
| 361 |
+
"""
|
| 362 |
+
adapter_id = self.adapter_ids[0]
|
| 363 |
+
output_dir = os.path.join(self.output_directory, "checkpoints")
|
| 364 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 365 |
+
date_str = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
| 366 |
+
agent_adapter_dir = f"{adapter_id}-{checkpoint_indicator}-{date_str}"
|
| 367 |
+
export_path = os.path.join(output_dir, agent_adapter_dir)
|
| 368 |
+
for adapter_id in self.adapter_ids:
|
| 369 |
+
if "agent" in adapter_id:
|
| 370 |
+
self.past_agent_adapter_paths[
|
| 371 |
+
f"{agent_adapter_dir}_buffer"
|
| 372 |
+
] = os.path.join(export_path, adapter_id)
|
| 373 |
+
self.past_agent_adapter_ids.append(f"{agent_adapter_dir}_buffer")
|
| 374 |
+
self.weights_got_updated[f"{agent_adapter_dir}_buffer"] = False
|
| 375 |
+
self.hf_adapters[adapter_id].save_pretrained(export_path)
|
| 376 |
+
|
| 377 |
+
def short_id_generator(self) -> str:
|
| 378 |
+
"""
|
| 379 |
+
Generates a short unique ID for tracking adapter versions.
|
| 380 |
+
|
| 381 |
+
Returns:
|
| 382 |
+
int: An 8-digit integer ID.
|
| 383 |
+
"""
|
| 384 |
+
return str(uuid.uuid4().int)[:8]
|
src_code_for_reproducibility/models/scalar_critic.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch, torch.nn as nn, torch.optim as optim
|
| 2 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 3 |
+
from peft import LoraConfig, get_peft_model
|
| 4 |
+
|
| 5 |
+
from mllm.models.adapter_training_wrapper import AdapterWrapper
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class ScalarCritic(nn.Module):
|
| 9 |
+
"""
|
| 10 |
+
A causal-LM critic_adapter + a scalar value head:
|
| 11 |
+
V_φ(s) = wᵀ h_last + b
|
| 12 |
+
Only LoRA adapters (inside critic_adapter) and the value head are trainable.
|
| 13 |
+
"""
|
| 14 |
+
def __init__(self, critic_adapter: AdapterWrapper):
|
| 15 |
+
super().__init__()
|
| 16 |
+
self.critic_adapter = critic_adapter
|
| 17 |
+
hidden_size = self.critic_adapter.shared_llm.config.hidden_size
|
| 18 |
+
self.value_head = nn.Linear(hidden_size, 1).to(
|
| 19 |
+
dtype=critic_adapter.dtype,
|
| 20 |
+
device=critic_adapter.device)
|
| 21 |
+
|
| 22 |
+
def forward(self,
|
| 23 |
+
input_ids,
|
| 24 |
+
attention_mask=None,
|
| 25 |
+
**kwargs):
|
| 26 |
+
# AdapterWrapper activates its own adapter internally
|
| 27 |
+
outputs = self.critic_adapter(
|
| 28 |
+
input_ids=input_ids,
|
| 29 |
+
attention_mask=attention_mask,
|
| 30 |
+
output_hidden_states=True,
|
| 31 |
+
**kwargs,
|
| 32 |
+
)
|
| 33 |
+
h_last = outputs.hidden_states[-1] # (B, S, H)
|
| 34 |
+
values = self.value_head(h_last).squeeze(-1) # (B, S)
|
| 35 |
+
return values
|
| 36 |
+
|
| 37 |
+
def parameters(self, recurse: bool = True):
|
| 38 |
+
"""Iterator over *trainable* parameters for this critic."""
|
| 39 |
+
# 1) LoRA params for *this* adapter
|
| 40 |
+
for p in self.critic_adapter.parameters():
|
| 41 |
+
yield p
|
| 42 |
+
# 2) scalar head
|
| 43 |
+
yield from self.value_head.parameters()
|
| 44 |
+
|
| 45 |
+
def gradient_checkpointing_enable(self, *args, **kwargs):
|
| 46 |
+
self.critic_adapter.gradient_checkpointing_enable(*args, **kwargs)
|
| 47 |
+
|
| 48 |
+
@property
|
| 49 |
+
def dtype(self):
|
| 50 |
+
return self.critic_adapter.dtype
|
| 51 |
+
|
| 52 |
+
@property
|
| 53 |
+
def device(self):
|
| 54 |
+
return self.critic_adapter.device
|
src_code_for_reproducibility/training/README.md
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Suppose we have a trajectory with 3 timesteps.
|
| 2 |
+
token: "0 1 2 3 4 5 6 7 8 9 . . . . ."
|
| 3 |
+
string: "A B C a b c A a A a b c A B C" (Capitalized = User, Lowercased = Assistant)
|
| 4 |
+
action_mask: "x x x ✓ ✓ ✓ x ✓ x ✓ ✓ ✓ x x x" (F = False, T = True)
|
| 5 |
+
rewards: "r r r r r r R R R R R R r r r"
|
| 6 |
+
timestep: "0 0 0 0 0 0 1 1 1 1 1 1 2 2 2"
|
| 7 |
+
state_ends: "x x ✓ x x x ✓ x x x x x x x ✓"
|
| 8 |
+
|
| 9 |
+
There must be one baseline flag per timestep!
|
| 10 |
+
|
| 11 |
+
Then, we might have
|
| 12 |
+
|
| 13 |
+
A naive way to interpret this is to think of the number of assistant messages as the number of
|
| 14 |
+
steps in the environment. However, this is not the case in practice. Indeed, in a
|
| 15 |
+
single simulation step,
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
A subtlety arises with credit assignment. In the multi-agent case, we might
|
src_code_for_reproducibility/training/__init__.py
ADDED
|
File without changes
|
src_code_for_reproducibility/training/annealing_methods.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def sigmoid_annealing(step: int, temperature: float) -> float:
|
| 5 |
+
return 2 / (1 + np.exp(-step / temperature)) - 1
|
| 6 |
+
|
src_code_for_reproducibility/training/credit_methods.py
ADDED
|
@@ -0,0 +1,295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def whiten_advantages(advantages: torch.Tensor) -> torch.Tensor:
|
| 5 |
+
"""
|
| 6 |
+
Whitens the advantages.
|
| 7 |
+
"""
|
| 8 |
+
whitened_advantages = (advantages - torch.mean(advantages)) / (
|
| 9 |
+
torch.std(advantages) + 1e-9
|
| 10 |
+
)
|
| 11 |
+
return whitened_advantages
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def whiten_advantages_time_step_wise(
|
| 15 |
+
advantages: torch.Tensor, # (B, T)
|
| 16 |
+
) -> torch.Tensor:
|
| 17 |
+
"""
|
| 18 |
+
Whitens the advantages.
|
| 19 |
+
"""
|
| 20 |
+
assert advantages.dim() == 2, "Wrong dimensions."
|
| 21 |
+
whitened_advantages_time_step_wise = (
|
| 22 |
+
advantages - advantages.mean(dim=0, keepdim=True)
|
| 23 |
+
) / (advantages.std(dim=0, keepdim=True) + 1e-9)
|
| 24 |
+
return whitened_advantages_time_step_wise
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def get_discounted_state_visitation_credits(
|
| 28 |
+
credits: torch.Tensor, discount_factor: float # (B, T)
|
| 29 |
+
) -> torch.Tensor:
|
| 30 |
+
"""
|
| 31 |
+
Computes discounted state visitation credits for a sequence of credits.
|
| 32 |
+
"""
|
| 33 |
+
return credits * (
|
| 34 |
+
discount_factor ** torch.arange(credits.shape[1], device=credits.device)
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def get_discounted_returns(
|
| 39 |
+
rewards: torch.Tensor, # (B, T)
|
| 40 |
+
discount_factor: float,
|
| 41 |
+
) -> torch.Tensor:
|
| 42 |
+
"""
|
| 43 |
+
Computes Monte Carlo discounted returns for a sequence of rewards.
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
rewards (torch.Tensor): Array of rewards for each timestep.
|
| 47 |
+
|
| 48 |
+
Returns:
|
| 49 |
+
torch.Tensor: Array of discounted returns.
|
| 50 |
+
"""
|
| 51 |
+
assert rewards.dim() == 2, "Wrong dimensions."
|
| 52 |
+
B, T = rewards.shape
|
| 53 |
+
discounted_returns = torch.zeros_like(rewards)
|
| 54 |
+
accumulator = torch.zeros(B, device=rewards.device, dtype=rewards.dtype)
|
| 55 |
+
for t in reversed(range(T)):
|
| 56 |
+
accumulator = rewards[:, t] + discount_factor * accumulator
|
| 57 |
+
discounted_returns[:, t] = accumulator
|
| 58 |
+
return discounted_returns
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def get_rloo_credits(credits: torch.Tensor): # (B, S)
|
| 62 |
+
assert credits.dim() == 2, "Wrong dimensions."
|
| 63 |
+
rloo_baselines = torch.zeros_like(credits)
|
| 64 |
+
n = credits.shape[0]
|
| 65 |
+
if n == 1:
|
| 66 |
+
return credits, rloo_baselines
|
| 67 |
+
rloo_baselines = (torch.sum(credits, dim=0, keepdim=True) - credits) / (n - 1)
|
| 68 |
+
rloo_credits = credits - rloo_baselines
|
| 69 |
+
return rloo_credits, rloo_baselines
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def get_generalized_advantage_estimates(
|
| 73 |
+
rewards: torch.Tensor, # (B, T)
|
| 74 |
+
value_estimates: torch.Tensor, # (B, T+1)
|
| 75 |
+
discount_factor: float,
|
| 76 |
+
lambda_coef: float,
|
| 77 |
+
) -> torch.Tensor:
|
| 78 |
+
"""
|
| 79 |
+
Computes Generalized Advantage Estimates (GAE) for a sequence of rewards and value estimates.
|
| 80 |
+
See https://arxiv.org/pdf/1506.02438 for details.
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
Returns:
|
| 84 |
+
torch.Tensor: Array of GAE values.
|
| 85 |
+
"""
|
| 86 |
+
assert rewards.dim() == value_estimates.dim() == 2, "Wrong dimensions."
|
| 87 |
+
|
| 88 |
+
assert (
|
| 89 |
+
rewards.shape[0] == value_estimates.shape[0]
|
| 90 |
+
), f"Got shapes {rewards.shape} and {value_estimates.shape} of rewards and value estimates."
|
| 91 |
+
assert (
|
| 92 |
+
rewards.shape[1] == value_estimates.shape[1] - 1
|
| 93 |
+
), f"Got shapes {rewards.shape} and {value_estimates.shape} of rewards and value estimates."
|
| 94 |
+
|
| 95 |
+
T = rewards.shape[1]
|
| 96 |
+
tds = rewards + discount_factor * value_estimates[:, 1:] - value_estimates[:, :-1]
|
| 97 |
+
gaes = torch.zeros_like(tds)
|
| 98 |
+
acc = 0.0
|
| 99 |
+
for t in reversed(range(T)):
|
| 100 |
+
acc = tds[:, t] + lambda_coef * discount_factor * acc
|
| 101 |
+
gaes[:, t] = acc
|
| 102 |
+
return gaes
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def get_advantage_alignment_weights(
|
| 106 |
+
advantages: torch.Tensor, # (B, T)
|
| 107 |
+
exclude_k_equals_t: bool,
|
| 108 |
+
gamma: float,
|
| 109 |
+
) -> torch.Tensor:
|
| 110 |
+
"""
|
| 111 |
+
The advantage alignment credit is calculated as
|
| 112 |
+
|
| 113 |
+
\[
|
| 114 |
+
A^*(s_t, a_t, b_t) = A^1(s_t, a_t, b_t) + \beta \cdot
|
| 115 |
+
\left( \sum_{k < t} \gamma^{t-k} A^1(s_k, a_k, b_k) \right)
|
| 116 |
+
A^2(s_t, a_t, b_t)
|
| 117 |
+
\]
|
| 118 |
+
|
| 119 |
+
Here, the weights are defined as \( \beta \cdot
|
| 120 |
+
\left( \sum_{k < t} \gamma^{t-k} A^1(s_k, a_k, b_k) \)
|
| 121 |
+
"""
|
| 122 |
+
T = advantages.shape[1]
|
| 123 |
+
discounted_advantages = advantages * (
|
| 124 |
+
gamma * torch.ones((1, T), device=advantages.device)
|
| 125 |
+
) ** (-torch.arange(0, T, 1, device=advantages.device))
|
| 126 |
+
if exclude_k_equals_t:
|
| 127 |
+
sub = torch.eye(T, device=advantages.device)
|
| 128 |
+
else:
|
| 129 |
+
sub = torch.zeros((T, T), device=advantages.device)
|
| 130 |
+
|
| 131 |
+
# Identity is for \( k < t \), remove for \( k \leq t \)
|
| 132 |
+
ad_align_weights = discounted_advantages @ (
|
| 133 |
+
torch.triu(torch.ones((T, T), device=advantages.device)) - sub
|
| 134 |
+
)
|
| 135 |
+
t_discounts = (gamma * torch.ones((1, T), device=advantages.device)) ** (
|
| 136 |
+
torch.arange(0, T, 1, device=advantages.device)
|
| 137 |
+
)
|
| 138 |
+
ad_align_weights = t_discounts * ad_align_weights
|
| 139 |
+
return ad_align_weights
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def get_advantage_alignment_credits(
|
| 143 |
+
a1: torch.Tensor, # (B, S)
|
| 144 |
+
a1_alternative: torch.Tensor, # (B, S, A)
|
| 145 |
+
a2: torch.Tensor, # (B, S)
|
| 146 |
+
exclude_k_equals_t: bool,
|
| 147 |
+
beta: float,
|
| 148 |
+
gamma: float = 1.0,
|
| 149 |
+
use_old_ad_align: bool = False,
|
| 150 |
+
use_sign: bool = False,
|
| 151 |
+
clipping: float | None = None,
|
| 152 |
+
use_time_regularization: bool = False,
|
| 153 |
+
force_coop_first_step: bool = False,
|
| 154 |
+
use_variance_regularization: bool = False,
|
| 155 |
+
rloo_branch: bool = False,
|
| 156 |
+
reuse_baseline: bool = False,
|
| 157 |
+
mean_normalize_ad_align: bool = False,
|
| 158 |
+
whiten_adalign_advantages: bool = False,
|
| 159 |
+
whiten_adalign_advantages_time_step_wise: bool = False,
|
| 160 |
+
) -> torch.Tensor:
|
| 161 |
+
"""
|
| 162 |
+
Calculate the advantage alignment credits with vectorization, as described in https://arxiv.org/abs/2406.14662.
|
| 163 |
+
|
| 164 |
+
Recall that the advantage opponent shaping term of the AdAlign policy gradient is:
|
| 165 |
+
\[
|
| 166 |
+
\beta \mathbb{E}_{\substack{
|
| 167 |
+
\tau \sim \text{Pr}_{\mu}^{\pi^1, \pi^2} \\
|
| 168 |
+
a_t' \sim \pi^1(\cdot \mid s_t)
|
| 169 |
+
}}
|
| 170 |
+
\left[\sum_{t=0}^\infty \gamma^{t}\left( \sum_{k\leq t} A^1(s_k,a^{\prime}_k,b_k) \right) A^{2}(s_t,a_t, b_t)\nabla_{\theta^1}\text{log } \pi^1(a_t|s_t) \right]
|
| 171 |
+
\]
|
| 172 |
+
|
| 173 |
+
This method computes the following:
|
| 174 |
+
\[
|
| 175 |
+
Credit(s_t, a_t, b_t) = \gamma^t \left[ A^1(s_t, a_t, b_t) + \beta \left( \sum_{k\leq t} A^1(s_k,a^{\prime}_k,b_k) \right) A^{2}(s_t,a_t, b_t) \right]
|
| 176 |
+
\]
|
| 177 |
+
|
| 178 |
+
Args:
|
| 179 |
+
a1: Advantages of the main trajectories for the current agent.
|
| 180 |
+
a1_alternative: Advantages of the alternative trajectories for the current agent.
|
| 181 |
+
a2: Advantages of the main trajectories for the other agent.
|
| 182 |
+
discount_factor: Discount factor for the advantage alignment.
|
| 183 |
+
beta: Beta parameter for the advantage alignment.
|
| 184 |
+
gamma: Gamma parameter for the advantage alignment.
|
| 185 |
+
use_sign_in_ad_align: Whether to use sign in the advantage alignment.
|
| 186 |
+
|
| 187 |
+
Returns:
|
| 188 |
+
torch.Tensor: The advantage alignment credits.
|
| 189 |
+
"""
|
| 190 |
+
|
| 191 |
+
assert a1.dim() == a2.dim() == 2, "Advantages must be of shape (B, S)"
|
| 192 |
+
if a1_alternative is not None:
|
| 193 |
+
assert (
|
| 194 |
+
a1_alternative.dim() == 3
|
| 195 |
+
), "Alternative advantages must be of shape (B, S, A)"
|
| 196 |
+
B, T, A = a1_alternative.shape
|
| 197 |
+
else:
|
| 198 |
+
B, T = a1.shape
|
| 199 |
+
assert a1.shape == a2.shape, "Not the same shape"
|
| 200 |
+
|
| 201 |
+
sub_tensors = {}
|
| 202 |
+
|
| 203 |
+
if use_old_ad_align:
|
| 204 |
+
ad_align_weights = get_advantage_alignment_weights(
|
| 205 |
+
advantages=a1, exclude_k_equals_t=exclude_k_equals_t, gamma=gamma
|
| 206 |
+
)
|
| 207 |
+
sub_tensors["ad_align_weights_prev"] = ad_align_weights
|
| 208 |
+
if exclude_k_equals_t:
|
| 209 |
+
ad_align_weights = gamma * ad_align_weights
|
| 210 |
+
else:
|
| 211 |
+
assert a1_alternative is not None, "Alternative advantages must be provided"
|
| 212 |
+
if rloo_branch:
|
| 213 |
+
a1_alternative = torch.cat([a1.unsqueeze(2), a1_alternative], dim=2)
|
| 214 |
+
a1_alternative = a1_alternative.mean(dim=2)
|
| 215 |
+
# print(f"a1_alternative: {a1_alternative}, a1: {a1}\n")
|
| 216 |
+
a1, baseline = get_rloo_credits(a1)
|
| 217 |
+
if reuse_baseline:
|
| 218 |
+
a1_alternative = a1_alternative - baseline
|
| 219 |
+
else:
|
| 220 |
+
a1_alternative, _ = get_rloo_credits(a1_alternative)
|
| 221 |
+
assert a1.shape == a1_alternative.shape, "Not the same shape"
|
| 222 |
+
ad_align_weights = get_advantage_alignment_weights(
|
| 223 |
+
advantages=a1_alternative,
|
| 224 |
+
exclude_k_equals_t=exclude_k_equals_t,
|
| 225 |
+
gamma=gamma,
|
| 226 |
+
)
|
| 227 |
+
sub_tensors["ad_align_weights"] = ad_align_weights
|
| 228 |
+
|
| 229 |
+
# Use sign
|
| 230 |
+
if use_sign:
|
| 231 |
+
assert beta == 1.0, "beta should be 1.0 when using sign"
|
| 232 |
+
positive_signs = ad_align_weights > 0
|
| 233 |
+
negative_signs = ad_align_weights < 0
|
| 234 |
+
ad_align_weights[positive_signs] = 1
|
| 235 |
+
ad_align_weights[negative_signs] = -1
|
| 236 |
+
sub_tensors["ad_align_weights_sign"] = ad_align_weights
|
| 237 |
+
# (rest are 0)
|
| 238 |
+
|
| 239 |
+
###################
|
| 240 |
+
# Process weights
|
| 241 |
+
###################
|
| 242 |
+
|
| 243 |
+
# Use clipping
|
| 244 |
+
if clipping not in [0.0, None]:
|
| 245 |
+
upper_mask = ad_align_weights > 1
|
| 246 |
+
lower_mask = ad_align_weights < -1
|
| 247 |
+
|
| 248 |
+
ad_align_weights = torch.clip(
|
| 249 |
+
ad_align_weights,
|
| 250 |
+
-clipping,
|
| 251 |
+
clipping,
|
| 252 |
+
)
|
| 253 |
+
clipping_ratio = (
|
| 254 |
+
torch.sum(upper_mask) + torch.sum(lower_mask)
|
| 255 |
+
) / upper_mask.size
|
| 256 |
+
sub_tensors["clipped_ad_align_weights"] = ad_align_weights
|
| 257 |
+
|
| 258 |
+
# 1/1+t Regularization
|
| 259 |
+
if use_time_regularization:
|
| 260 |
+
t_values = torch.arange(1, T + 1).to(ad_align_weights.device)
|
| 261 |
+
ad_align_weights = ad_align_weights / t_values
|
| 262 |
+
sub_tensors["time_regularized_ad_align_weights"] = ad_align_weights
|
| 263 |
+
|
| 264 |
+
# Use coop on t=0
|
| 265 |
+
if force_coop_first_step:
|
| 266 |
+
ad_align_weights[:, 0] = 1
|
| 267 |
+
sub_tensors["coop_first_step_ad_align_weights"] = ad_align_weights
|
| 268 |
+
# # Normalize alignment terms (across same time step)
|
| 269 |
+
# if use_variance_regularization_in_ad_align:
|
| 270 |
+
# # TODO: verify
|
| 271 |
+
# reg_coef = torch.std(a1[:, -1]) / (torch.std(opp_shaping_terms[:, -1]) + 1e-9)
|
| 272 |
+
# opp_shaping_terms *= reg_coef
|
| 273 |
+
|
| 274 |
+
####################################
|
| 275 |
+
# Compose elements together
|
| 276 |
+
####################################
|
| 277 |
+
|
| 278 |
+
opp_shaping_terms = beta * ad_align_weights * a2
|
| 279 |
+
sub_tensors["ad_align_opp_shaping_terms"] = opp_shaping_terms
|
| 280 |
+
|
| 281 |
+
credits = a1 + opp_shaping_terms
|
| 282 |
+
if mean_normalize_ad_align:
|
| 283 |
+
credits = credits - credits.mean(dim=0)
|
| 284 |
+
sub_tensors["mean_normalized_ad_align_credits"] = credits
|
| 285 |
+
if whiten_adalign_advantages:
|
| 286 |
+
credits = (credits - credits.mean()) / (credits.std() + 1e-9)
|
| 287 |
+
sub_tensors["whitened_ad_align_credits"] = credits
|
| 288 |
+
if whiten_adalign_advantages_time_step_wise:
|
| 289 |
+
credits = (credits - credits.mean(dim=0, keepdim=True)) / (
|
| 290 |
+
credits.std(dim=0, keepdim=True) + 1e-9
|
| 291 |
+
)
|
| 292 |
+
sub_tensors["whitened_ad_align_credits_time_step_wise"] = credits
|
| 293 |
+
sub_tensors["final_ad_align_credits"] = credits
|
| 294 |
+
|
| 295 |
+
return credits, sub_tensors
|
src_code_for_reproducibility/training/tally_metrics.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from numbers import Number
|
| 3 |
+
from typing import Union
|
| 4 |
+
|
| 5 |
+
import wandb
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class Tally:
|
| 9 |
+
"""
|
| 10 |
+
Minimal scalar-first tally.
|
| 11 |
+
- Keys are strings.
|
| 12 |
+
- First add stores a scalar; subsequent adds upgrade to a list of scalars.
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
def __init__(self):
|
| 16 |
+
self.stats = {}
|
| 17 |
+
|
| 18 |
+
def reset(self):
|
| 19 |
+
self.stats = {}
|
| 20 |
+
|
| 21 |
+
def _coerce_scalar(self, value: Union[int, float]) -> Union[int, float]:
|
| 22 |
+
if hasattr(value, "item") and callable(getattr(value, "item")):
|
| 23 |
+
try:
|
| 24 |
+
value = value.item()
|
| 25 |
+
except Exception:
|
| 26 |
+
pass
|
| 27 |
+
if isinstance(value, Number):
|
| 28 |
+
return value
|
| 29 |
+
raise AssertionError("Metric must be a scalar number")
|
| 30 |
+
|
| 31 |
+
def add_metric(self, path: str, metric: Union[int, float]):
|
| 32 |
+
metric = float(metric)
|
| 33 |
+
assert isinstance(path, str), "Path must be a string."
|
| 34 |
+
assert isinstance(metric, float), "Metric must be a scalar number."
|
| 35 |
+
|
| 36 |
+
scalar = self._coerce_scalar(metric)
|
| 37 |
+
existing = self.stats.get(path)
|
| 38 |
+
if existing is None:
|
| 39 |
+
self.stats[path] = scalar
|
| 40 |
+
elif isinstance(existing, list):
|
| 41 |
+
existing.append(scalar)
|
| 42 |
+
else:
|
| 43 |
+
self.stats[path] = [existing, scalar]
|
| 44 |
+
|
| 45 |
+
def save(self, identifier: str, folder: str):
|
| 46 |
+
os.makedirs(name=folder, exist_ok=True)
|
| 47 |
+
try:
|
| 48 |
+
import pickle
|
| 49 |
+
|
| 50 |
+
pkl_path = os.path.join(folder, f"{identifier}.tally.pkl")
|
| 51 |
+
payload = self.stats
|
| 52 |
+
with open(pkl_path, "wb") as f:
|
| 53 |
+
pickle.dump(payload, f, protocol=pickle.HIGHEST_PROTOCOL)
|
| 54 |
+
except Exception:
|
| 55 |
+
pass
|
src_code_for_reproducibility/training/tally_rollout.py
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
from copy import deepcopy
|
| 4 |
+
from typing import Union
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
import pandas as pd
|
| 8 |
+
import torch
|
| 9 |
+
from transformers import AutoTokenizer
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class RolloutTallyItem:
|
| 13 |
+
def __init__(self, crn_ids: list[str], rollout_ids: list[str], agent_ids: list[str], metric_matrix: torch.Tensor):
|
| 14 |
+
"""
|
| 15 |
+
Initializes the RolloutTallyItem object.
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
crn_ids (list[str]): List of CRN IDs.
|
| 19 |
+
rollout_ids (list[str]): List of rollout IDs.
|
| 20 |
+
agent_ids (list[str]): List of agent IDs.
|
| 21 |
+
metric_matrix (torch.Tensor): Metric matrix.
|
| 22 |
+
"""
|
| 23 |
+
if isinstance(crn_ids, torch.Tensor):
|
| 24 |
+
crn_ids = crn_ids.detach().cpu().numpy()
|
| 25 |
+
if isinstance(rollout_ids, torch.Tensor):
|
| 26 |
+
rollout_ids = rollout_ids.detach().cpu().numpy()
|
| 27 |
+
if isinstance(agent_ids, torch.Tensor):
|
| 28 |
+
agent_ids = agent_ids.detach().cpu().numpy()
|
| 29 |
+
self.crn_ids = crn_ids
|
| 30 |
+
self.rollout_ids = rollout_ids
|
| 31 |
+
self.agent_ids = agent_ids
|
| 32 |
+
metric_matrix = metric_matrix.detach().cpu()
|
| 33 |
+
assert 0 < metric_matrix.ndim <= 2, "Metric matrix must have less than or equal to 2 dimensions"
|
| 34 |
+
if metric_matrix.ndim == 1:
|
| 35 |
+
metric_matrix = metric_matrix.reshape(1, -1)
|
| 36 |
+
# Convert to float32 if tensor is in BFloat16 format (not supported by numpy)
|
| 37 |
+
if metric_matrix.dtype == torch.bfloat16:
|
| 38 |
+
metric_matrix = metric_matrix.float()
|
| 39 |
+
self.metric_matrix = metric_matrix.numpy()
|
| 40 |
+
|
| 41 |
+
class RolloutTally:
|
| 42 |
+
"""
|
| 43 |
+
Tally is a utility class for collecting and storing training metrics.
|
| 44 |
+
It supports adding metrics at specified paths and saving them to disk.
|
| 45 |
+
"""
|
| 46 |
+
|
| 47 |
+
def __init__(self):
|
| 48 |
+
"""
|
| 49 |
+
Initializes the RolloutTally object.
|
| 50 |
+
|
| 51 |
+
Args:
|
| 52 |
+
tokenizer (AutoTokenizer): Tokenizer for converting token IDs to strings.
|
| 53 |
+
max_context_length (int, optional): Maximum context length for contextualized metrics. Defaults to 30.
|
| 54 |
+
"""
|
| 55 |
+
# Array-preserving structure (leaf lists hold numpy arrays / scalars)
|
| 56 |
+
self.metrics = {}
|
| 57 |
+
# Global ordered list of sample identifiers (crn_id, rollout_id) added in the order samples are processed
|
| 58 |
+
|
| 59 |
+
def reset(self):
|
| 60 |
+
"""
|
| 61 |
+
Resets the base and contextualized tallies to empty dictionaries.
|
| 62 |
+
"""
|
| 63 |
+
self.metrics = {}
|
| 64 |
+
|
| 65 |
+
def get_from_nested_dict(self, dictio: dict, path: str):
|
| 66 |
+
"""
|
| 67 |
+
Retrieves the value at a nested path in a dictionary.
|
| 68 |
+
|
| 69 |
+
Args:
|
| 70 |
+
dictio (dict): The dictionary to search.
|
| 71 |
+
path (list): List of keys representing the path.
|
| 72 |
+
|
| 73 |
+
Returns:
|
| 74 |
+
Any: The value at the specified path, or None if not found.
|
| 75 |
+
"""
|
| 76 |
+
assert isinstance(path, list), "Path must be list."
|
| 77 |
+
for sp in path[:-1]:
|
| 78 |
+
dictio = dictio.setdefault(sp, {})
|
| 79 |
+
return dictio.get(path[-1], None)
|
| 80 |
+
|
| 81 |
+
def set_at_path(self, dictio: dict, path: str, value):
|
| 82 |
+
"""
|
| 83 |
+
Sets a value at a nested path in a dictionary, creating intermediate dictionaries as needed.
|
| 84 |
+
|
| 85 |
+
Args:
|
| 86 |
+
dictio (dict): The dictionary to modify.
|
| 87 |
+
path (list): List of keys representing the path.
|
| 88 |
+
value (Any): The value to set at the specified path.
|
| 89 |
+
"""
|
| 90 |
+
for sp in path[:-1]:
|
| 91 |
+
dictio = dictio.setdefault(sp, {})
|
| 92 |
+
dictio[path[-1]] = value
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def add_metric(
|
| 96 |
+
self, path: list[str], rollout_tally_item: RolloutTallyItem
|
| 97 |
+
):
|
| 98 |
+
"""
|
| 99 |
+
Adds a metric to the base tally at the specified path.
|
| 100 |
+
|
| 101 |
+
Args:
|
| 102 |
+
path (list): List of keys representing the path in the base tally.
|
| 103 |
+
rollout_tally_item (RolloutTallyItem): The rollout tally item to add.
|
| 104 |
+
"""
|
| 105 |
+
rollout_tally_item = deepcopy(rollout_tally_item)
|
| 106 |
+
|
| 107 |
+
# Update array-preserving tally
|
| 108 |
+
array_list = self.get_from_nested_dict(dictio=self.metrics, path=path)
|
| 109 |
+
if array_list is None:
|
| 110 |
+
self.set_at_path(dictio=self.metrics, path=path, value=[rollout_tally_item])
|
| 111 |
+
else:
|
| 112 |
+
array_list.append(rollout_tally_item)
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def save(self, identifier: str, folder: str):
|
| 116 |
+
"""
|
| 117 |
+
Saves the base and contextualized tallies to disk as JSON files, and also saves contextualized tallies as CSV files for each game/rollout.
|
| 118 |
+
|
| 119 |
+
Args:
|
| 120 |
+
path (str): Directory path where the metrics will be saved.
|
| 121 |
+
"""
|
| 122 |
+
os.makedirs(name=folder, exist_ok=True)
|
| 123 |
+
|
| 124 |
+
from datetime import datetime
|
| 125 |
+
|
| 126 |
+
now = datetime.now()
|
| 127 |
+
|
| 128 |
+
# Pickle only (fastest, exact structure with numpy/scalars at leaves)
|
| 129 |
+
try:
|
| 130 |
+
import pickle
|
| 131 |
+
|
| 132 |
+
pkl_path = os.path.join(folder, f"{identifier}.rt_tally.pkl")
|
| 133 |
+
payload = {"metrics": self.metrics}
|
| 134 |
+
with open(pkl_path, "wb") as f:
|
| 135 |
+
pickle.dump(payload, f, protocol=pickle.HIGHEST_PROTOCOL)
|
| 136 |
+
except Exception:
|
| 137 |
+
pass
|
src_code_for_reproducibility/training/tally_tokenwise.py
ADDED
|
@@ -0,0 +1,276 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
from typing import Any, Dict, List, Tuple, Union
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import pandas as pd
|
| 7 |
+
import torch
|
| 8 |
+
from transformers import AutoTokenizer
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class ContextualizedTokenwiseTally:
|
| 12 |
+
"""
|
| 13 |
+
Collect, store, and save token-level metrics per rollout.
|
| 14 |
+
|
| 15 |
+
- One DataFrame per rollout_id in `paths`
|
| 16 |
+
- Index = timestep (int)
|
| 17 |
+
- Columns are added incrementally via `add_contexts()` and `add_data()`
|
| 18 |
+
- Cells may contain scalars, strings, or lists (dtype=object)
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
def __init__(
|
| 22 |
+
self,
|
| 23 |
+
tokenizer: AutoTokenizer,
|
| 24 |
+
paths: List[str],
|
| 25 |
+
max_context_length: int = 30,
|
| 26 |
+
):
|
| 27 |
+
"""
|
| 28 |
+
Args:
|
| 29 |
+
tokenizer: HuggingFace tokenizer used to convert tids -> tokens
|
| 30 |
+
paths: rollout identifiers (parallel to batch dimension)
|
| 31 |
+
max_context_length: truncate context token lists to this length
|
| 32 |
+
"""
|
| 33 |
+
self.tokenizer = tokenizer
|
| 34 |
+
self.paths = paths
|
| 35 |
+
self.max_context_length = max_context_length
|
| 36 |
+
self.tally: Dict[str, pd.DataFrame] = {path: pd.DataFrame() for path in paths}
|
| 37 |
+
|
| 38 |
+
# set later by setters
|
| 39 |
+
self.contexts: torch.Tensor | None = None
|
| 40 |
+
self.action_mask: torch.Tensor | None = None
|
| 41 |
+
self.range: Tuple[int, int] | None = None
|
| 42 |
+
|
| 43 |
+
# --------- Utilities ---------
|
| 44 |
+
|
| 45 |
+
def tids_to_str(self, tids: List[int]) -> List[str]:
|
| 46 |
+
"""Convert a list of token IDs to a list of token strings."""
|
| 47 |
+
return self.tokenizer.convert_ids_to_tokens(tids)
|
| 48 |
+
|
| 49 |
+
def _ensure_ready(self):
|
| 50 |
+
assert self.action_mask is not None, "call set_action_mask(mask) first"
|
| 51 |
+
assert self.range is not None, "call set_range((start, end)) first"
|
| 52 |
+
|
| 53 |
+
@staticmethod
|
| 54 |
+
def _sanitize_filename(name: Any) -> str:
|
| 55 |
+
"""Make a safe filename from any rollout_id."""
|
| 56 |
+
s = str(name)
|
| 57 |
+
bad = {os.sep, " ", ":", "|", "<", ">", '"', "'"}
|
| 58 |
+
if os.altsep is not None:
|
| 59 |
+
bad.add(os.altsep)
|
| 60 |
+
for ch in bad:
|
| 61 |
+
s = s.replace(ch, "_")
|
| 62 |
+
return s
|
| 63 |
+
|
| 64 |
+
@staticmethod
|
| 65 |
+
def _pad_left(seq: List[Any], length: int, pad_val: Any = "") -> List[Any]:
|
| 66 |
+
"""Left-pad a sequence to `length` with `pad_val`."""
|
| 67 |
+
if len(seq) >= length:
|
| 68 |
+
return seq[-length:]
|
| 69 |
+
return [pad_val] * (length - len(seq)) + list(seq)
|
| 70 |
+
|
| 71 |
+
# --------- Setters ---------
|
| 72 |
+
|
| 73 |
+
def set_action_mask(self, action_mask: torch.Tensor):
|
| 74 |
+
"""
|
| 75 |
+
action_mask: (B, S) bool or 0/1 indicating valid steps
|
| 76 |
+
"""
|
| 77 |
+
self.action_mask = action_mask
|
| 78 |
+
|
| 79 |
+
def set_range(self, range: Tuple[int, int]):
|
| 80 |
+
"""
|
| 81 |
+
range: slice (start, end) into self.paths for current batch
|
| 82 |
+
"""
|
| 83 |
+
self.range = range
|
| 84 |
+
|
| 85 |
+
# --------- Column builders ---------
|
| 86 |
+
|
| 87 |
+
def add_contexts(self, contexts: torch.Tensor):
|
| 88 |
+
"""
|
| 89 |
+
Add a single 'context' column (list[str]) for valid steps.
|
| 90 |
+
|
| 91 |
+
Expects `contexts` with shape (B, S): token id at each timestep.
|
| 92 |
+
For each valid timestep t, we use the last N tokens up to and including t:
|
| 93 |
+
window = contexts[i, max(0, t - N + 1) : t + 1]
|
| 94 |
+
The list is left-padded with "" to always be length N.
|
| 95 |
+
"""
|
| 96 |
+
self._ensure_ready()
|
| 97 |
+
|
| 98 |
+
current_paths = self.paths[self.range[0] : self.range[1]]
|
| 99 |
+
B, S = contexts.shape
|
| 100 |
+
N = self.max_context_length
|
| 101 |
+
|
| 102 |
+
# to CPU ints once
|
| 103 |
+
contexts_cpu = contexts.detach().to("cpu")
|
| 104 |
+
|
| 105 |
+
for i in range(B):
|
| 106 |
+
rollout_id = current_paths[i]
|
| 107 |
+
df = self.tally.get(rollout_id, pd.DataFrame())
|
| 108 |
+
|
| 109 |
+
valid_idx = torch.nonzero(
|
| 110 |
+
self.action_mask[i].bool(), as_tuple=False
|
| 111 |
+
).squeeze(-1)
|
| 112 |
+
if valid_idx.numel() == 0:
|
| 113 |
+
self.tally[rollout_id] = df
|
| 114 |
+
continue
|
| 115 |
+
|
| 116 |
+
idx_list = valid_idx.tolist()
|
| 117 |
+
|
| 118 |
+
# ensure index contains valid steps
|
| 119 |
+
if df.empty:
|
| 120 |
+
df = pd.DataFrame(index=idx_list)
|
| 121 |
+
else:
|
| 122 |
+
new_index = sorted(set(df.index.tolist()) | set(idx_list))
|
| 123 |
+
if list(df.index) != new_index:
|
| 124 |
+
df = df.reindex(new_index)
|
| 125 |
+
|
| 126 |
+
# build context windows
|
| 127 |
+
ctx_token_lists = []
|
| 128 |
+
for t in idx_list:
|
| 129 |
+
start = max(0, t - N + 1)
|
| 130 |
+
window_ids = contexts_cpu[i, start : t + 1].tolist()
|
| 131 |
+
window_toks = self.tids_to_str([int(x) for x in window_ids])
|
| 132 |
+
if len(window_toks) < N:
|
| 133 |
+
window_toks = [""] * (N - len(window_toks)) + window_toks
|
| 134 |
+
else:
|
| 135 |
+
window_toks = window_toks[-N:]
|
| 136 |
+
ctx_token_lists.append(window_toks)
|
| 137 |
+
|
| 138 |
+
# single 'context' column
|
| 139 |
+
if "context" not in df.columns:
|
| 140 |
+
df["context"] = pd.Series(index=df.index, dtype=object)
|
| 141 |
+
df.loc[idx_list, "context"] = pd.Series(
|
| 142 |
+
ctx_token_lists, index=idx_list, dtype=object
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
self.tally[rollout_id] = df
|
| 146 |
+
|
| 147 |
+
def add_data(
|
| 148 |
+
self,
|
| 149 |
+
metric_id: str,
|
| 150 |
+
metrics: torch.Tensor,
|
| 151 |
+
to_tids: bool = False,
|
| 152 |
+
):
|
| 153 |
+
"""
|
| 154 |
+
Add a metric column for valid steps.
|
| 155 |
+
|
| 156 |
+
Args:
|
| 157 |
+
metric_id: column name
|
| 158 |
+
metrics: shape (B, S) for scalars/ids or (B, S, K) for top-k vectors
|
| 159 |
+
to_tids: if True, treat ints/lists of ints as tids and convert to tokens
|
| 160 |
+
"""
|
| 161 |
+
self._ensure_ready()
|
| 162 |
+
current_paths = self.paths[self.range[0] : self.range[1]]
|
| 163 |
+
|
| 164 |
+
if metrics.dim() == 2:
|
| 165 |
+
B, S = metrics.shape
|
| 166 |
+
elif metrics.dim() == 3:
|
| 167 |
+
B, S, _ = metrics.shape
|
| 168 |
+
else:
|
| 169 |
+
raise ValueError("metrics must be (B, S) or (B, S, K)")
|
| 170 |
+
|
| 171 |
+
for i in range(B):
|
| 172 |
+
rollout_id = current_paths[i]
|
| 173 |
+
df = self.tally.get(rollout_id, pd.DataFrame())
|
| 174 |
+
|
| 175 |
+
valid_idx = torch.nonzero(
|
| 176 |
+
self.action_mask[i].bool(), as_tuple=False
|
| 177 |
+
).squeeze(-1)
|
| 178 |
+
if valid_idx.numel() == 0:
|
| 179 |
+
self.tally[rollout_id] = df
|
| 180 |
+
continue
|
| 181 |
+
|
| 182 |
+
idx_list = valid_idx.detach().cpu().tolist()
|
| 183 |
+
|
| 184 |
+
# Ensure index contains valid steps
|
| 185 |
+
if df.empty:
|
| 186 |
+
df = pd.DataFrame(index=idx_list)
|
| 187 |
+
else:
|
| 188 |
+
new_index = sorted(set(df.index.tolist()) | set(idx_list))
|
| 189 |
+
if list(df.index) != new_index:
|
| 190 |
+
df = df.reindex(new_index)
|
| 191 |
+
|
| 192 |
+
# Slice metrics at valid steps
|
| 193 |
+
m_valid = metrics[i][valid_idx]
|
| 194 |
+
|
| 195 |
+
# -> pure python lists (1D list or list-of-lists)
|
| 196 |
+
values = m_valid.detach().cpu().tolist()
|
| 197 |
+
|
| 198 |
+
# optional tids -> tokens
|
| 199 |
+
if to_tids:
|
| 200 |
+
|
| 201 |
+
def _to_tokish(x):
|
| 202 |
+
if isinstance(x, list):
|
| 203 |
+
return self.tids_to_str([int(v) for v in x])
|
| 204 |
+
else:
|
| 205 |
+
return self.tids_to_str([int(x)])[0]
|
| 206 |
+
|
| 207 |
+
values = [_to_tokish(v) for v in values]
|
| 208 |
+
|
| 209 |
+
# Ensure column exists with object dtype, then assign via aligned Series
|
| 210 |
+
if metric_id not in df.columns:
|
| 211 |
+
df[metric_id] = pd.Series(index=df.index, dtype=object)
|
| 212 |
+
|
| 213 |
+
if isinstance(values, np.ndarray):
|
| 214 |
+
values = values.tolist()
|
| 215 |
+
|
| 216 |
+
if len(values) != len(idx_list):
|
| 217 |
+
raise ValueError(
|
| 218 |
+
f"Length mismatch for '{metric_id}': values={len(values)} vs idx_list={len(idx_list)}"
|
| 219 |
+
)
|
| 220 |
+
|
| 221 |
+
df.loc[idx_list, metric_id] = pd.Series(
|
| 222 |
+
values, index=idx_list, dtype=object
|
| 223 |
+
)
|
| 224 |
+
self.tally[rollout_id] = df
|
| 225 |
+
|
| 226 |
+
# --------- Saving ---------
|
| 227 |
+
|
| 228 |
+
def save(self, path: str):
|
| 229 |
+
"""
|
| 230 |
+
Write a manifest JSON and one CSV per rollout.
|
| 231 |
+
|
| 232 |
+
- Manifest includes metadata only (safe to JSON).
|
| 233 |
+
- Each rollout CSV is written with index label 'timestep'.
|
| 234 |
+
- Only a single 'context' column (list[str]).
|
| 235 |
+
"""
|
| 236 |
+
if not self.tally or all(df.empty for df in self.tally.values()):
|
| 237 |
+
return
|
| 238 |
+
|
| 239 |
+
os.makedirs(path, exist_ok=True)
|
| 240 |
+
from datetime import datetime
|
| 241 |
+
|
| 242 |
+
now = datetime.now()
|
| 243 |
+
|
| 244 |
+
manifest = {
|
| 245 |
+
"created_at": f"{now:%Y-%m-%d %H:%M:%S}",
|
| 246 |
+
"max_context_length": self.max_context_length,
|
| 247 |
+
"num_rollouts": len(self.tally),
|
| 248 |
+
"rollouts": [],
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
for rid, df in self.tally.items():
|
| 252 |
+
rid_str = str(rid)
|
| 253 |
+
safe_name = self._sanitize_filename(rid_str)
|
| 254 |
+
csv_path = os.path.join(path, f"{safe_name}_tokenwise.csv")
|
| 255 |
+
|
| 256 |
+
# Put 'context' first, then the rest
|
| 257 |
+
cols = ["context"] + [c for c in df.columns if c != "context"]
|
| 258 |
+
try:
|
| 259 |
+
df[cols].to_csv(csv_path, index=True, index_label="timestep")
|
| 260 |
+
except Exception as e:
|
| 261 |
+
continue
|
| 262 |
+
|
| 263 |
+
manifest["rollouts"].append(
|
| 264 |
+
{
|
| 265 |
+
"rollout_id": rid_str,
|
| 266 |
+
"csv": csv_path,
|
| 267 |
+
"num_rows": int(df.shape[0]),
|
| 268 |
+
"columns": cols,
|
| 269 |
+
}
|
| 270 |
+
)
|
| 271 |
+
|
| 272 |
+
manifest_path = os.path.join(
|
| 273 |
+
path, f"tokenwise_manifest_{now:%Y-%m-%d___%H-%M-%S}.json"
|
| 274 |
+
)
|
| 275 |
+
with open(manifest_path, "w") as fp:
|
| 276 |
+
json.dump(manifest, fp, indent=2)
|
src_code_for_reproducibility/training/tokenize_chats.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import sys
|
| 3 |
+
|
| 4 |
+
import regex
|
| 5 |
+
import torch
|
| 6 |
+
from transformers import AutoTokenizer
|
| 7 |
+
|
| 8 |
+
from mllm.training.training_data_utils import TrainingChatTurn, TrajectoryBatch
|
| 9 |
+
|
| 10 |
+
logger = logging.getLogger(__name__)
|
| 11 |
+
logger.addHandler(logging.StreamHandler(sys.stdout))
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# def get_chat_dicts(chat: list[TrainingChatTurn]) -> list[dict]:
|
| 15 |
+
# chat_dicts = [chat_turn.dict() for chat_turn in chat]
|
| 16 |
+
# return chat_dicts
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def process_training_chat(
|
| 20 |
+
tokenizer: AutoTokenizer,
|
| 21 |
+
chat_history: list[TrainingChatTurn],
|
| 22 |
+
entropy_mask_regex: str | None = None,
|
| 23 |
+
exploration_prompts_to_remove: list[str] = [],
|
| 24 |
+
use_engine_out_token_ids: bool = False,
|
| 25 |
+
) -> tuple[torch.IntTensor, torch.BoolTensor, torch.IntTensor, torch.BoolTensor]:
|
| 26 |
+
"""Tokenize a single training chat and build aligned per-token masks.
|
| 27 |
+
|
| 28 |
+
Given an ordered list of `TrainingChatTurn`, this function tokenizes each
|
| 29 |
+
turn independently using the tokenizer's chat template, then concatenates
|
| 30 |
+
all resulting token sequences. It also constructs three parallel 1D masks
|
| 31 |
+
that align with the concatenated tokens:
|
| 32 |
+
|
| 33 |
+
- input_ids: token ids for the entire chat, turn by turn
|
| 34 |
+
- action_mask: True for tokens that belong to assistant turns (i.e., model
|
| 35 |
+
actions), False for tokens from other roles
|
| 36 |
+
- timesteps: per-token time step copied from the originating turn's
|
| 37 |
+
`time_step`
|
| 38 |
+
- state_ends_mask: True for the last token of any turn where
|
| 39 |
+
`is_state_end` is True, otherwise False
|
| 40 |
+
|
| 41 |
+
Important details:
|
| 42 |
+
- Each turn is passed as a single-message list to
|
| 43 |
+
`tokenizer.apply_chat_template` and flattened; the per-turn outputs are
|
| 44 |
+
then concatenated in the original order.
|
| 45 |
+
- Turn boundaries are not explicitly encoded beyond what the chat template
|
| 46 |
+
inserts; masks provide alignment for learning signals and state endings.
|
| 47 |
+
- No truncation or padding is performed here; downstream code should handle
|
| 48 |
+
batching/padding as needed.
|
| 49 |
+
- Note on dtypes: `input_ids` will be a LongTensor (int64). `action_mask`
|
| 50 |
+
and `state_ends_mask` are BoolTensors. `timesteps` is currently created
|
| 51 |
+
as a float tensor; adjust the implementation if integer dtype is
|
| 52 |
+
required downstream.
|
| 53 |
+
|
| 54 |
+
Args:
|
| 55 |
+
tokenizer: A Hugging Face tokenizer supporting `apply_chat_template`.
|
| 56 |
+
chat_history: Ordered list of `TrainingChatTurn` forming one dialogue.
|
| 57 |
+
|
| 58 |
+
Returns:
|
| 59 |
+
A tuple of four 1D tensors, all of equal length N (the total number of
|
| 60 |
+
tokens across all turns), in the following order:
|
| 61 |
+
- input_ids (LongTensor)
|
| 62 |
+
- action_mask (BoolTensor)
|
| 63 |
+
- timesteps (FloatTensor as implemented; see note above)
|
| 64 |
+
- state_ends_mask (BoolTensor)
|
| 65 |
+
"""
|
| 66 |
+
state_ends_mask = []
|
| 67 |
+
input_ids = []
|
| 68 |
+
action_mask = []
|
| 69 |
+
timesteps = []
|
| 70 |
+
entropy_mask = []
|
| 71 |
+
engine_log_probs = []
|
| 72 |
+
for train_chat_turn in chat_history:
|
| 73 |
+
is_state_end = train_chat_turn.is_state_end
|
| 74 |
+
time_step = train_chat_turn.time_step
|
| 75 |
+
is_action = train_chat_turn.role == "assistant"
|
| 76 |
+
|
| 77 |
+
# Remove exploration prompts from training data
|
| 78 |
+
for exploration_prompt in exploration_prompts_to_remove:
|
| 79 |
+
if exploration_prompt in train_chat_turn.content:
|
| 80 |
+
train_chat_turn.content = train_chat_turn.content.replace(
|
| 81 |
+
exploration_prompt, ""
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
chat_turn = {
|
| 85 |
+
"role": train_chat_turn.role,
|
| 86 |
+
"content": train_chat_turn.content,
|
| 87 |
+
}
|
| 88 |
+
if entropy_mask_regex is not None:
|
| 89 |
+
is_entropy_mask_true = (
|
| 90 |
+
regex.search(entropy_mask_regex, train_chat_turn.content) is not None
|
| 91 |
+
)
|
| 92 |
+
else:
|
| 93 |
+
is_entropy_mask_true = True
|
| 94 |
+
if is_action:
|
| 95 |
+
chat_turn_ids = train_chat_turn.out_token_ids
|
| 96 |
+
nb_chat_turns_ids = chat_turn_ids.numel()
|
| 97 |
+
action_mask.append(torch.ones(nb_chat_turns_ids, dtype=torch.bool))
|
| 98 |
+
engine_log_probs.append(train_chat_turn.log_probs)
|
| 99 |
+
else:
|
| 100 |
+
chat_turn_ids = train_chat_turn.chat_template_token_ids
|
| 101 |
+
nb_chat_turns_ids = chat_turn_ids.numel()
|
| 102 |
+
action_mask.append(torch.zeros(nb_chat_turns_ids, dtype=torch.bool))
|
| 103 |
+
engine_log_probs.append(torch.zeros(nb_chat_turns_ids, dtype=torch.float))
|
| 104 |
+
nb_chat_turns_ids = chat_turn_ids.numel()
|
| 105 |
+
state_ends_mask.append(torch.zeros(nb_chat_turns_ids, dtype=torch.bool))
|
| 106 |
+
if is_state_end:
|
| 107 |
+
state_ends_mask[-1][-1] = True # last token is state end
|
| 108 |
+
input_ids.append(chat_turn_ids)
|
| 109 |
+
entropy_mask.append(torch.ones(nb_chat_turns_ids, dtype=torch.bool))
|
| 110 |
+
if not is_entropy_mask_true:
|
| 111 |
+
entropy_mask[-1] = entropy_mask[-1] * False
|
| 112 |
+
timesteps.append(torch.ones(nb_chat_turns_ids) * time_step)
|
| 113 |
+
input_ids = torch.cat(input_ids)
|
| 114 |
+
action_mask = torch.cat(action_mask)
|
| 115 |
+
entropy_mask = torch.cat(entropy_mask)
|
| 116 |
+
timesteps = torch.cat(timesteps)
|
| 117 |
+
timesteps = timesteps.to(torch.long)
|
| 118 |
+
state_ends_mask = torch.cat(state_ends_mask)
|
| 119 |
+
engine_log_probs = torch.cat(engine_log_probs)
|
| 120 |
+
|
| 121 |
+
return (
|
| 122 |
+
input_ids,
|
| 123 |
+
action_mask,
|
| 124 |
+
entropy_mask,
|
| 125 |
+
timesteps,
|
| 126 |
+
state_ends_mask,
|
| 127 |
+
engine_log_probs,
|
| 128 |
+
)
|
src_code_for_reproducibility/training/trainer_ad_align.py
ADDED
|
@@ -0,0 +1,492 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import logging
|
| 3 |
+
import sys
|
| 4 |
+
from dataclasses import dataclass
|
| 5 |
+
from typing import Tuple
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
from torch.nn.utils.rnn import pad_sequence
|
| 9 |
+
|
| 10 |
+
from mllm.markov_games.rollout_tree import (
|
| 11 |
+
ChatTurn,
|
| 12 |
+
RolloutTreeBranchNode,
|
| 13 |
+
RolloutTreeRootNode,
|
| 14 |
+
)
|
| 15 |
+
from mllm.training.credit_methods import (
|
| 16 |
+
get_advantage_alignment_credits,
|
| 17 |
+
get_discounted_state_visitation_credits,
|
| 18 |
+
)
|
| 19 |
+
from mllm.training.tally_metrics import Tally
|
| 20 |
+
from mllm.training.tally_rollout import RolloutTally, RolloutTallyItem
|
| 21 |
+
from mllm.training.tally_tokenwise import ContextualizedTokenwiseTally
|
| 22 |
+
from mllm.training.tokenize_chats import process_training_chat
|
| 23 |
+
from mllm.training.trainer_common import BaseTrainer
|
| 24 |
+
from mllm.training.training_data_utils import (
|
| 25 |
+
AdvantagePacket,
|
| 26 |
+
TrainingBatch,
|
| 27 |
+
TrainingChatTurn,
|
| 28 |
+
TrajectoryBatch,
|
| 29 |
+
get_main_chat_list_and_rewards,
|
| 30 |
+
get_tokenwise_credits,
|
| 31 |
+
)
|
| 32 |
+
from mllm.utils.resource_context import resource_logger_context
|
| 33 |
+
|
| 34 |
+
logger = logging.getLogger(__name__)
|
| 35 |
+
logger.addHandler(logging.StreamHandler(sys.stdout))
|
| 36 |
+
|
| 37 |
+
RolloutId = int
|
| 38 |
+
AgentId = str
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
@dataclass
|
| 42 |
+
class AdAlignTrainingData:
|
| 43 |
+
agent_id: str
|
| 44 |
+
main_data: TrajectoryBatch
|
| 45 |
+
# list-of-tensors: per rollout advantages with length jT
|
| 46 |
+
main_advantages: list[torch.FloatTensor] | None = None
|
| 47 |
+
# list-of-tensors: per rollout matrix (jT, A)
|
| 48 |
+
alternative_advantages: list[torch.FloatTensor] | None = None
|
| 49 |
+
advantage_alignment_credits: list[torch.FloatTensor] | None = None
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def get_alternative_chat_histories(
|
| 53 |
+
agent_id: str, root: RolloutTreeRootNode
|
| 54 |
+
) -> list[list[TrainingChatTurn], list[torch.FloatTensor]]:
|
| 55 |
+
"""
|
| 56 |
+
args:
|
| 57 |
+
agent_id: The agent we want to get the chat history for.
|
| 58 |
+
root: The root of the rollout tree.
|
| 59 |
+
returns:
|
| 60 |
+
alternative_chats: list[list[TrainingChatTurn]] (jT*A, jS')
|
| 61 |
+
alternative_rewards: list[torch.FloatTensor] (jT*A, jT')
|
| 62 |
+
"""
|
| 63 |
+
current_node = root.child
|
| 64 |
+
branches = current_node.branches
|
| 65 |
+
pre_branch_chat = []
|
| 66 |
+
pre_branch_rewards = []
|
| 67 |
+
alternative_rewards = []
|
| 68 |
+
alternative_chats = []
|
| 69 |
+
while current_node is not None:
|
| 70 |
+
assert isinstance(
|
| 71 |
+
current_node, RolloutTreeBranchNode
|
| 72 |
+
), "Current node should be a branch node."
|
| 73 |
+
main_node = current_node.main_child
|
| 74 |
+
branches = current_node.branches
|
| 75 |
+
current_node = main_node.child
|
| 76 |
+
|
| 77 |
+
# Get the `A` alternative trajectories
|
| 78 |
+
alternative_nodes = branches[agent_id]
|
| 79 |
+
for alt_node in alternative_nodes:
|
| 80 |
+
post_branch_chat, post_branch_rewards = get_main_chat_list_and_rewards(
|
| 81 |
+
agent_id=agent_id, root=alt_node
|
| 82 |
+
)
|
| 83 |
+
branch_chat = pre_branch_chat + post_branch_chat
|
| 84 |
+
alternative_chats.append(branch_chat)
|
| 85 |
+
alternative_rewards.append(
|
| 86 |
+
torch.cat([torch.tensor(pre_branch_rewards), post_branch_rewards])
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
chat_turns: list[ChatTurn] = main_node.step_log.action_logs[agent_id].chat_turns
|
| 90 |
+
chat_turns: list[TrainingChatTurn] = [
|
| 91 |
+
TrainingChatTurn(time_step=main_node.time_step, **turn.model_dump())
|
| 92 |
+
for turn in chat_turns
|
| 93 |
+
]
|
| 94 |
+
|
| 95 |
+
pre_branch_chat.extend(chat_turns)
|
| 96 |
+
pre_branch_rewards.append(
|
| 97 |
+
main_node.step_log.simulation_step_log.rewards[agent_id]
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
return alternative_chats, alternative_rewards
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class TrainerAdAlign(BaseTrainer):
|
| 104 |
+
"""
|
| 105 |
+
Extends the reinforce trainer to support Advantage Alignment.
|
| 106 |
+
"""
|
| 107 |
+
|
| 108 |
+
def __init__(
|
| 109 |
+
self,
|
| 110 |
+
ad_align_beta: float,
|
| 111 |
+
ad_align_gamma: float,
|
| 112 |
+
ad_align_exclude_k_equals_t: bool,
|
| 113 |
+
ad_align_use_sign: bool,
|
| 114 |
+
ad_align_clipping: float,
|
| 115 |
+
ad_align_force_coop_first_step: bool,
|
| 116 |
+
use_old_ad_align: bool,
|
| 117 |
+
use_time_regularization: bool,
|
| 118 |
+
rloo_branch: bool,
|
| 119 |
+
reuse_baseline: bool,
|
| 120 |
+
ad_align_beta_anneal_step: int = -1,
|
| 121 |
+
ad_align_beta_anneal_rate: float = 0.5,
|
| 122 |
+
min_ad_align_beta: float = 0.1,
|
| 123 |
+
mean_normalize_ad_align: bool = False,
|
| 124 |
+
whiten_adalign_advantages: bool = False,
|
| 125 |
+
whiten_adalign_advantages_time_step_wise: bool = False,
|
| 126 |
+
*args,
|
| 127 |
+
**kwargs,
|
| 128 |
+
):
|
| 129 |
+
"""
|
| 130 |
+
Initialize the advantage alignment trainer.
|
| 131 |
+
Args:
|
| 132 |
+
ad_align_beta: Beta parameter for the advantage alignment.
|
| 133 |
+
ad_align_gamma: Gamma parameter for the advantage alignment.
|
| 134 |
+
ad_align_exclude_k_equals_t: Whether to include k = t in the advantage alignment.
|
| 135 |
+
ad_align_use_sign: Whether to use sign in the advantage alignment.
|
| 136 |
+
ad_align_clipping: Clipping value for the advantage alignment.
|
| 137 |
+
ad_align_force_coop_first_step: Whether to force coop on the first step of the advantage alignment.
|
| 138 |
+
"""
|
| 139 |
+
super().__init__(*args, **kwargs)
|
| 140 |
+
self.ad_align_beta = ad_align_beta
|
| 141 |
+
self.ad_align_gamma = ad_align_gamma
|
| 142 |
+
self.ad_align_exclude_k_equals_t = ad_align_exclude_k_equals_t
|
| 143 |
+
self.ad_align_use_sign = ad_align_use_sign
|
| 144 |
+
self.ad_align_clipping = ad_align_clipping
|
| 145 |
+
self.ad_align_force_coop_first_step = ad_align_force_coop_first_step
|
| 146 |
+
self.use_old_ad_align = use_old_ad_align
|
| 147 |
+
self.use_time_regularization = use_time_regularization
|
| 148 |
+
self.rloo_branch = rloo_branch
|
| 149 |
+
self.reuse_baseline = reuse_baseline
|
| 150 |
+
self.ad_align_beta_anneal_step = ad_align_beta_anneal_step
|
| 151 |
+
self.ad_align_beta_anneal_rate = ad_align_beta_anneal_rate
|
| 152 |
+
self.min_ad_align_beta = min_ad_align_beta
|
| 153 |
+
self.past_ad_align_step = -1
|
| 154 |
+
self.mean_normalize_ad_align = mean_normalize_ad_align
|
| 155 |
+
self.whiten_adalign_advantages = whiten_adalign_advantages
|
| 156 |
+
self.whiten_adalign_advantages_time_step_wise = (
|
| 157 |
+
whiten_adalign_advantages_time_step_wise
|
| 158 |
+
)
|
| 159 |
+
self.training_data: dict[AgentId, AdAlignTrainingData] = {}
|
| 160 |
+
self.debug_path_list: list[str] = []
|
| 161 |
+
|
| 162 |
+
def set_agent_trajectory_data(
|
| 163 |
+
self, agent_id: str, roots: list[RolloutTreeRootNode]
|
| 164 |
+
):
|
| 165 |
+
"""
|
| 166 |
+
TOWRITE
|
| 167 |
+
Set the advantage alignment data for the trainer.
|
| 168 |
+
"""
|
| 169 |
+
|
| 170 |
+
B = len(roots) # Number of rollouts
|
| 171 |
+
|
| 172 |
+
# For main rollouts
|
| 173 |
+
batch_rollout_ids = []
|
| 174 |
+
batch_crn_ids = []
|
| 175 |
+
batch_input_ids = []
|
| 176 |
+
batch_action_mask = []
|
| 177 |
+
batch_entropy_mask = []
|
| 178 |
+
batch_timesteps = []
|
| 179 |
+
batch_state_ends_mask = []
|
| 180 |
+
batch_engine_log_probs = []
|
| 181 |
+
batch_rewards = []
|
| 182 |
+
|
| 183 |
+
# For alternative actions rollouts
|
| 184 |
+
batch_branching_time_steps = []
|
| 185 |
+
alternative_batch_input_ids = []
|
| 186 |
+
alternative_batch_action_mask = []
|
| 187 |
+
alternative_batch_entropy_mask = []
|
| 188 |
+
alternative_batch_timesteps = []
|
| 189 |
+
alternative_batch_state_ends_mask = []
|
| 190 |
+
alternative_batch_engine_log_probs = []
|
| 191 |
+
alternative_batch_rewards = []
|
| 192 |
+
jT_list = []
|
| 193 |
+
|
| 194 |
+
try:
|
| 195 |
+
A = len(roots[0].child.branches[agent_id]) # Number of alternative actions
|
| 196 |
+
except:
|
| 197 |
+
A = 0
|
| 198 |
+
|
| 199 |
+
for root in roots:
|
| 200 |
+
rollout_id = root.id
|
| 201 |
+
self.debug_path_list.append(
|
| 202 |
+
"mgid:" + str(rollout_id) + "_agent_id:" + agent_id
|
| 203 |
+
)
|
| 204 |
+
# Get main trajectory
|
| 205 |
+
batch_rollout_ids.append(rollout_id)
|
| 206 |
+
batch_crn_ids.append(root.crn_id)
|
| 207 |
+
main_chat, main_rewards = get_main_chat_list_and_rewards(
|
| 208 |
+
agent_id=agent_id, root=root
|
| 209 |
+
)
|
| 210 |
+
(
|
| 211 |
+
input_ids,
|
| 212 |
+
action_mask,
|
| 213 |
+
entropy_mask,
|
| 214 |
+
timesteps,
|
| 215 |
+
state_ends_mask,
|
| 216 |
+
engine_log_probs,
|
| 217 |
+
) = process_training_chat(
|
| 218 |
+
tokenizer=self.tokenizer,
|
| 219 |
+
chat_history=main_chat,
|
| 220 |
+
entropy_mask_regex=self.entropy_mask_regex,
|
| 221 |
+
exploration_prompts_to_remove=self.exploration_prompts_to_remove,
|
| 222 |
+
)
|
| 223 |
+
batch_input_ids.append(input_ids)
|
| 224 |
+
batch_action_mask.append(action_mask)
|
| 225 |
+
batch_entropy_mask.append(entropy_mask)
|
| 226 |
+
batch_timesteps.append(timesteps)
|
| 227 |
+
batch_state_ends_mask.append(state_ends_mask)
|
| 228 |
+
batch_engine_log_probs.append(engine_log_probs)
|
| 229 |
+
batch_rewards.append(main_rewards)
|
| 230 |
+
jT = main_rewards.numel() # TODO: better than this
|
| 231 |
+
jT_list.append(jT)
|
| 232 |
+
if A > 0:
|
| 233 |
+
# We get the branching time steps for each of the `jT` time steps in the main trajectory.
|
| 234 |
+
branching_time_steps = [bt for item in range(jT) for bt in A * [item]]
|
| 235 |
+
batch_branching_time_steps.extend(branching_time_steps)
|
| 236 |
+
|
| 237 |
+
# Get all of the (jT*A) alternative trajectories in the tree
|
| 238 |
+
# (jT is the number of time steps in the main trajectory, A is the number of alternative actions)
|
| 239 |
+
alternative_chats, alternative_rewards = get_alternative_chat_histories(
|
| 240 |
+
agent_id=agent_id, root=root
|
| 241 |
+
)
|
| 242 |
+
assert (
|
| 243 |
+
len(alternative_chats) == A * jT
|
| 244 |
+
), "Incorrect number of alternative trajectories."
|
| 245 |
+
|
| 246 |
+
for chat, rewards in zip(alternative_chats, alternative_rewards):
|
| 247 |
+
(
|
| 248 |
+
input_ids,
|
| 249 |
+
action_mask,
|
| 250 |
+
entropy_mask,
|
| 251 |
+
timesteps,
|
| 252 |
+
state_ends_mask,
|
| 253 |
+
engine_log_probs,
|
| 254 |
+
) = process_training_chat(
|
| 255 |
+
tokenizer=self.tokenizer,
|
| 256 |
+
chat_history=chat,
|
| 257 |
+
entropy_mask_regex=self.entropy_mask_regex,
|
| 258 |
+
exploration_prompts_to_remove=self.exploration_prompts_to_remove,
|
| 259 |
+
)
|
| 260 |
+
alternative_batch_input_ids.append(input_ids)
|
| 261 |
+
alternative_batch_action_mask.append(action_mask)
|
| 262 |
+
alternative_batch_entropy_mask.append(entropy_mask)
|
| 263 |
+
alternative_batch_timesteps.append(timesteps)
|
| 264 |
+
alternative_batch_state_ends_mask.append(state_ends_mask)
|
| 265 |
+
alternative_batch_engine_log_probs.append(engine_log_probs)
|
| 266 |
+
alternative_batch_rewards.append(rewards)
|
| 267 |
+
|
| 268 |
+
jT_list = torch.Tensor(jT_list)
|
| 269 |
+
|
| 270 |
+
# Assert that number of alternative actions is constant
|
| 271 |
+
# assert len(set(nb_alternative_actions)) == 1, "Number of alternative actions must be constant"
|
| 272 |
+
# A = nb_alternative_actions[0]
|
| 273 |
+
|
| 274 |
+
trajectory_batch = TrajectoryBatch(
|
| 275 |
+
rollout_ids=torch.tensor(batch_rollout_ids, dtype=torch.int32), # (B,)
|
| 276 |
+
crn_ids=torch.tensor(batch_crn_ids, dtype=torch.int32),
|
| 277 |
+
agent_ids=[agent_id] * len(batch_rollout_ids),
|
| 278 |
+
batch_input_ids=batch_input_ids,
|
| 279 |
+
batch_action_mask=batch_action_mask,
|
| 280 |
+
batch_entropy_mask=batch_entropy_mask,
|
| 281 |
+
batch_timesteps=batch_timesteps,
|
| 282 |
+
batch_state_ends_mask=batch_state_ends_mask,
|
| 283 |
+
batch_engine_log_probs=batch_engine_log_probs,
|
| 284 |
+
batch_rewards=batch_rewards,
|
| 285 |
+
)
|
| 286 |
+
# Get Advantages & Train Critic
|
| 287 |
+
with resource_logger_context(
|
| 288 |
+
logger, "Get advantages with critic gradient accumulation"
|
| 289 |
+
):
|
| 290 |
+
self.batch_advantages: torch.FloatTensor = (
|
| 291 |
+
self.get_advantages_with_critic_gradient_accumulation(trajectory_batch)
|
| 292 |
+
) # (B, jT)
|
| 293 |
+
|
| 294 |
+
if A > 0:
|
| 295 |
+
# Here, `A` is the number of alternative actions / trajectories taken at each time step.
|
| 296 |
+
# For each of the `B` rollout perspectives, at each of its jT (`j` is for jagged, since each main rollout may be of a different length) steps, we take A alternate trajectories (from different actions).
|
| 297 |
+
# Therefore, we have ∑jT * A trajectories to process. If each of the main trajectories have T steps, we will have `B*T*A` to process.
|
| 298 |
+
with resource_logger_context(logger, "Create alternative trajectory batch"):
|
| 299 |
+
sum_jT = int(torch.sum(jT_list).item())
|
| 300 |
+
jT_list = (
|
| 301 |
+
jT_list.int().tolist()
|
| 302 |
+
) # (jT,) # (we only want the advantages where we branched out)
|
| 303 |
+
alternative_trajectory_batch = TrajectoryBatch(
|
| 304 |
+
rollout_ids=torch.zeros(A * sum_jT, dtype=torch.int32),
|
| 305 |
+
crn_ids=torch.zeros(A * sum_jT, dtype=torch.int32),
|
| 306 |
+
agent_ids=[agent_id] * (A * sum_jT),
|
| 307 |
+
batch_input_ids=alternative_batch_input_ids,
|
| 308 |
+
batch_action_mask=alternative_batch_action_mask,
|
| 309 |
+
batch_entropy_mask=alternative_batch_entropy_mask,
|
| 310 |
+
batch_timesteps=alternative_batch_timesteps,
|
| 311 |
+
batch_state_ends_mask=alternative_batch_state_ends_mask,
|
| 312 |
+
batch_engine_log_probs=alternative_batch_engine_log_probs,
|
| 313 |
+
batch_rewards=alternative_batch_rewards,
|
| 314 |
+
)
|
| 315 |
+
|
| 316 |
+
# Get alternative advantages
|
| 317 |
+
# BAAs stands for batch alternative advantages
|
| 318 |
+
# (torch nested tensors have very little api support, so we have to do some odd manual work here)
|
| 319 |
+
with resource_logger_context(
|
| 320 |
+
logger, "Compute alternative advantage estimates"
|
| 321 |
+
):
|
| 322 |
+
BAAs_list = self.get_advantages_with_critic_gradient_accumulation(
|
| 323 |
+
alternative_trajectory_batch
|
| 324 |
+
) # list length (∑jT * A), each (jT',)
|
| 325 |
+
# Pad alternative advantages to (∑jT*A, P)
|
| 326 |
+
|
| 327 |
+
BAAs_padded = pad_sequence(
|
| 328 |
+
BAAs_list, batch_first=True, padding_value=0.0
|
| 329 |
+
)
|
| 330 |
+
branch_idx = torch.tensor(
|
| 331 |
+
batch_branching_time_steps,
|
| 332 |
+
device=BAAs_padded.device,
|
| 333 |
+
dtype=torch.long,
|
| 334 |
+
)
|
| 335 |
+
gathered = BAAs_padded.gather(
|
| 336 |
+
dim=1, index=branch_idx.unsqueeze(1)
|
| 337 |
+
).squeeze(1)
|
| 338 |
+
# Reshape and split per rollout, then transpose to (jT_i, A)
|
| 339 |
+
gathered = gathered.view(A, sum_jT) # (A, ∑jT)
|
| 340 |
+
blocks = list(
|
| 341 |
+
torch.split(gathered, jT_list, dim=1)
|
| 342 |
+
) # len B, shapes (A, jT_i)
|
| 343 |
+
BAAs = [
|
| 344 |
+
blk.transpose(0, 1).contiguous() for blk in blocks
|
| 345 |
+
] # list of (jT_i, A)
|
| 346 |
+
if self.ad_align_beta_anneal_step > 0:
|
| 347 |
+
max_rollout_id = torch.max(trajectory_batch.rollout_ids) + 1
|
| 348 |
+
if (
|
| 349 |
+
max_rollout_id % self.ad_align_beta_anneal_step == 0
|
| 350 |
+
and self.past_ad_align_step != max_rollout_id
|
| 351 |
+
):
|
| 352 |
+
self.ad_align_beta = max(
|
| 353 |
+
self.ad_align_beta * self.ad_align_beta_anneal_rate,
|
| 354 |
+
self.min_ad_align_beta,
|
| 355 |
+
)
|
| 356 |
+
logger.info(f"Annealing ad_align_beta to {self.ad_align_beta}")
|
| 357 |
+
self.past_ad_align_step = max_rollout_id
|
| 358 |
+
self.training_data[agent_id] = AdAlignTrainingData(
|
| 359 |
+
agent_id=agent_id,
|
| 360 |
+
main_data=trajectory_batch,
|
| 361 |
+
main_advantages=self.batch_advantages,
|
| 362 |
+
alternative_advantages=BAAs if A > 0 else None,
|
| 363 |
+
)
|
| 364 |
+
|
| 365 |
+
def share_advantage_data(self) -> list[AdvantagePacket]:
|
| 366 |
+
"""
|
| 367 |
+
Share the advantage alignment data with other agents.
|
| 368 |
+
Returns:
|
| 369 |
+
AdvantagePacket: The advantage packet containing the agent's advantages.
|
| 370 |
+
"""
|
| 371 |
+
logger.info(f"Sharing advantage alignment data.")
|
| 372 |
+
advantage_packets = []
|
| 373 |
+
for _, agent_data in self.training_data.items():
|
| 374 |
+
advantage_packets.append(
|
| 375 |
+
AdvantagePacket(
|
| 376 |
+
agent_id=agent_data.agent_id,
|
| 377 |
+
rollout_ids=agent_data.main_data.rollout_ids,
|
| 378 |
+
main_advantages=agent_data.main_advantages,
|
| 379 |
+
)
|
| 380 |
+
)
|
| 381 |
+
return advantage_packets
|
| 382 |
+
|
| 383 |
+
def receive_advantage_data(self, advantage_packets: list[AdvantagePacket]):
|
| 384 |
+
"""
|
| 385 |
+
Receive advantage packets from other players.
|
| 386 |
+
These contain the advantages of the other players' rollouts estimated by them.
|
| 387 |
+
"""
|
| 388 |
+
logger.info(f"Receiving advantage packets.")
|
| 389 |
+
|
| 390 |
+
assert (
|
| 391 |
+
len(advantage_packets) > 0
|
| 392 |
+
), "At least one advantage packet must be provided."
|
| 393 |
+
|
| 394 |
+
for agent_id, agent_data in self.training_data.items():
|
| 395 |
+
coagent_advantage_packets = [
|
| 396 |
+
packet for packet in advantage_packets if packet.agent_id != agent_id
|
| 397 |
+
]
|
| 398 |
+
agent_rollout_ids = agent_data.main_data.rollout_ids
|
| 399 |
+
agent_advantages = agent_data.main_advantages
|
| 400 |
+
co_agent_advantages = []
|
| 401 |
+
for rollout_id in agent_rollout_ids:
|
| 402 |
+
for co_agent_packet in coagent_advantage_packets:
|
| 403 |
+
if rollout_id in co_agent_packet.rollout_ids:
|
| 404 |
+
index = torch.where(rollout_id == co_agent_packet.rollout_ids)[
|
| 405 |
+
0
|
| 406 |
+
].item()
|
| 407 |
+
co_agent_advantages.append(
|
| 408 |
+
co_agent_packet.main_advantages[index]
|
| 409 |
+
)
|
| 410 |
+
# assumes that its two player game, with one co-agent
|
| 411 |
+
break
|
| 412 |
+
assert len(co_agent_advantages) == len(agent_advantages)
|
| 413 |
+
B = len(agent_advantages)
|
| 414 |
+
assert all(
|
| 415 |
+
a.shape[0] == b.shape[0]
|
| 416 |
+
for a, b in zip(co_agent_advantages, agent_advantages)
|
| 417 |
+
), "Number of advantages must match for advantage alignment."
|
| 418 |
+
|
| 419 |
+
# Get padded tensors (advantage alignment is invariant to padding)
|
| 420 |
+
lengths = torch.tensor(
|
| 421 |
+
[len(t) for t in agent_advantages],
|
| 422 |
+
device=self.device,
|
| 423 |
+
dtype=torch.long,
|
| 424 |
+
)
|
| 425 |
+
padded_main_advantages = pad_sequence(
|
| 426 |
+
agent_advantages, batch_first=True, padding_value=0.0
|
| 427 |
+
)
|
| 428 |
+
if agent_data.alternative_advantages:
|
| 429 |
+
padded_alternative_advantages = pad_sequence(
|
| 430 |
+
agent_data.alternative_advantages,
|
| 431 |
+
batch_first=True,
|
| 432 |
+
padding_value=0.0,
|
| 433 |
+
) # (B, P, A)
|
| 434 |
+
else:
|
| 435 |
+
padded_alternative_advantages = None
|
| 436 |
+
padded_co_agent_advantages = pad_sequence(
|
| 437 |
+
co_agent_advantages, batch_first=True, padding_value=0.0
|
| 438 |
+
)
|
| 439 |
+
|
| 440 |
+
# Create training batch data
|
| 441 |
+
credits, sub_tensors = get_advantage_alignment_credits(
|
| 442 |
+
a1=padded_main_advantages,
|
| 443 |
+
a1_alternative=padded_alternative_advantages,
|
| 444 |
+
a2=padded_co_agent_advantages,
|
| 445 |
+
beta=self.ad_align_beta,
|
| 446 |
+
gamma=self.ad_align_gamma,
|
| 447 |
+
exclude_k_equals_t=self.ad_align_exclude_k_equals_t,
|
| 448 |
+
use_sign=self.ad_align_use_sign,
|
| 449 |
+
clipping=self.ad_align_clipping,
|
| 450 |
+
force_coop_first_step=self.ad_align_force_coop_first_step,
|
| 451 |
+
use_old_ad_align=self.use_old_ad_align,
|
| 452 |
+
use_time_regularization=self.use_time_regularization,
|
| 453 |
+
rloo_branch=self.rloo_branch,
|
| 454 |
+
reuse_baseline=self.reuse_baseline,
|
| 455 |
+
mean_normalize_ad_align=self.mean_normalize_ad_align,
|
| 456 |
+
whiten_adalign_advantages=self.whiten_adalign_advantages,
|
| 457 |
+
whiten_adalign_advantages_time_step_wise=self.whiten_adalign_advantages_time_step_wise,
|
| 458 |
+
)
|
| 459 |
+
for key, value in sub_tensors.items():
|
| 460 |
+
self.rollout_tally.add_metric(
|
| 461 |
+
path=[key],
|
| 462 |
+
rollout_tally_item=RolloutTallyItem(
|
| 463 |
+
crn_ids=agent_data.main_data.crn_ids,
|
| 464 |
+
rollout_ids=agent_data.main_data.rollout_ids,
|
| 465 |
+
agent_ids=agent_data.main_data.agent_ids,
|
| 466 |
+
metric_matrix=value,
|
| 467 |
+
),
|
| 468 |
+
)
|
| 469 |
+
|
| 470 |
+
if not self.skip_discounted_state_visitation:
|
| 471 |
+
credits = get_discounted_state_visitation_credits(
|
| 472 |
+
credits,
|
| 473 |
+
self.discount_factor,
|
| 474 |
+
)
|
| 475 |
+
self.rollout_tally.add_metric(
|
| 476 |
+
path=["discounted_state_visitation_credits"],
|
| 477 |
+
rollout_tally_item=RolloutTallyItem(
|
| 478 |
+
crn_ids=agent_data.main_data.crn_ids,
|
| 479 |
+
rollout_ids=agent_data.main_data.rollout_ids,
|
| 480 |
+
agent_ids=agent_data.main_data.agent_ids,
|
| 481 |
+
metric_matrix=sub_tensors[
|
| 482 |
+
"discounted_state_visitation_credits"
|
| 483 |
+
],
|
| 484 |
+
),
|
| 485 |
+
)
|
| 486 |
+
|
| 487 |
+
# Slice back to jagged
|
| 488 |
+
advantage_alignment_credits = [credits[i, : lengths[i]] for i in range(B)]
|
| 489 |
+
# Replace stored training data for this agent by the concrete trajectory batch
|
| 490 |
+
# and attach the computed credits for policy gradient.
|
| 491 |
+
self.training_data[agent_id] = agent_data.main_data
|
| 492 |
+
self.training_data[agent_id].batch_credits = advantage_alignment_credits
|
src_code_for_reproducibility/training/trainer_common.py
ADDED
|
@@ -0,0 +1,1054 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
TODO: Add coefficients for losses (depend on total number of tokens or batch)
|
| 3 |
+
TODO: adapt reinforce step for torch.compile
|
| 4 |
+
TODO: add lr schedulers support
|
| 5 |
+
"""
|
| 6 |
+
import logging
|
| 7 |
+
import os
|
| 8 |
+
import pickle
|
| 9 |
+
import sys
|
| 10 |
+
from abc import ABC, abstractmethod
|
| 11 |
+
from typing import Callable, Literal, Union
|
| 12 |
+
|
| 13 |
+
import numpy as np
|
| 14 |
+
import torch
|
| 15 |
+
import torch.nn.functional as F
|
| 16 |
+
from accelerate import Accelerator
|
| 17 |
+
from pandas._libs.tslibs.offsets import CBMonthBegin
|
| 18 |
+
from peft import LoraConfig
|
| 19 |
+
from torch.nn.utils.rnn import pad_sequence
|
| 20 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 21 |
+
|
| 22 |
+
from mllm.markov_games.rollout_tree import *
|
| 23 |
+
from mllm.markov_games.rollout_tree import RolloutTreeRootNode
|
| 24 |
+
from mllm.training.annealing_methods import sigmoid_annealing
|
| 25 |
+
from mllm.training.credit_methods import (
|
| 26 |
+
get_discounted_returns,
|
| 27 |
+
get_generalized_advantage_estimates,
|
| 28 |
+
get_rloo_credits,
|
| 29 |
+
whiten_advantages,
|
| 30 |
+
whiten_advantages_time_step_wise,
|
| 31 |
+
)
|
| 32 |
+
from mllm.training.tally_metrics import Tally
|
| 33 |
+
from mllm.training.tally_rollout import RolloutTally, RolloutTallyItem
|
| 34 |
+
from mllm.training.tally_tokenwise import ContextualizedTokenwiseTally
|
| 35 |
+
from mllm.training.tokenize_chats import *
|
| 36 |
+
from mllm.training.tokenize_chats import process_training_chat
|
| 37 |
+
from mllm.training.training_data_utils import *
|
| 38 |
+
from mllm.training.training_data_utils import (
|
| 39 |
+
TrainingBatch,
|
| 40 |
+
TrajectoryBatch,
|
| 41 |
+
get_tokenwise_credits,
|
| 42 |
+
)
|
| 43 |
+
from mllm.utils.resource_context import resource_logger_context
|
| 44 |
+
|
| 45 |
+
logger = logging.getLogger(__name__)
|
| 46 |
+
logger.addHandler(logging.StreamHandler(sys.stdout))
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
@dataclass
|
| 50 |
+
class TrainerAnnealingState:
|
| 51 |
+
annealing_step_counter: int = 0
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class BaseTrainer(ABC):
|
| 55 |
+
"""
|
| 56 |
+
Trainer
|
| 57 |
+
"""
|
| 58 |
+
|
| 59 |
+
def __init__(
|
| 60 |
+
self,
|
| 61 |
+
policy: AutoModelForCausalLM,
|
| 62 |
+
policy_optimizer: torch.optim.Optimizer,
|
| 63 |
+
critic: Union[AutoModelForCausalLM, None],
|
| 64 |
+
critic_optimizer: Union[torch.optim.Optimizer, None],
|
| 65 |
+
tokenizer: AutoTokenizer,
|
| 66 |
+
lr_scheduler: torch.optim.lr_scheduler.LRScheduler,
|
| 67 |
+
critic_lr_scheduler: Union[torch.optim.lr_scheduler.LRScheduler, None],
|
| 68 |
+
######################################################################
|
| 69 |
+
entropy_coeff: float,
|
| 70 |
+
entropy_topk: int,
|
| 71 |
+
entropy_mask_regex: Union[str, None],
|
| 72 |
+
kl_coeff: float,
|
| 73 |
+
gradient_clipping: Union[float, None],
|
| 74 |
+
restrict_tokens: Union[list[str], None],
|
| 75 |
+
mini_batch_size: int,
|
| 76 |
+
use_gradient_checkpointing: bool,
|
| 77 |
+
temperature: float,
|
| 78 |
+
device: str,
|
| 79 |
+
whiten_advantages: bool,
|
| 80 |
+
whiten_advantages_time_step_wise: bool,
|
| 81 |
+
use_gae: bool,
|
| 82 |
+
use_gae_lambda_annealing: bool,
|
| 83 |
+
gae_lambda_annealing_limit: float,
|
| 84 |
+
gae_lambda_annealing_method: Literal["sigmoid_annealing"],
|
| 85 |
+
gae_lambda_annealing_method_params: dict,
|
| 86 |
+
pg_loss_normalization: Literal["batch", "nb_tokens"],
|
| 87 |
+
use_rloo: bool,
|
| 88 |
+
skip_discounted_state_visitation: bool,
|
| 89 |
+
discount_factor: float,
|
| 90 |
+
enable_tokenwise_logging: bool,
|
| 91 |
+
save_path: str,
|
| 92 |
+
reward_normalizing_constant: float = 1.0,
|
| 93 |
+
critic_loss_type: Literal["mse", "huber"] = "huber",
|
| 94 |
+
exploration_prompts_to_remove: list[str] = [],
|
| 95 |
+
filter_higher_refprob_tokens_kl: bool = False,
|
| 96 |
+
truncated_importance_sampling_ratio_cap: float = 0.0,
|
| 97 |
+
importance_sampling_strategy: Literal[
|
| 98 |
+
"per_token", "per_sequence"
|
| 99 |
+
] = "per_token",
|
| 100 |
+
):
|
| 101 |
+
"""
|
| 102 |
+
Initialize the REINFORCE trainer with reward shaping for multi-agent or single-agent training.
|
| 103 |
+
|
| 104 |
+
Args:
|
| 105 |
+
model (AutoModelForCausalLM): The main policy model.
|
| 106 |
+
tokenizer (AutoTokenizer): Tokenizer for the model.
|
| 107 |
+
optimizer (torch.optim.Optimizer): Optimizer for the policy model.
|
| 108 |
+
lr_scheduler (torch.optim.lr_scheduler.LRScheduler): Learning rate scheduler for the policy model.
|
| 109 |
+
critic (AutoModelForCausalLM or None): Critic model for value estimation (optional).
|
| 110 |
+
critic_optimizer (torch.optim.Optimizer or None): Optimizer for the critic model (optional).
|
| 111 |
+
critic_lr_scheduler (torch.optim.lr_scheduler.LRScheduler or None): LR scheduler for the critic (optional).
|
| 112 |
+
config (RtConfig): Configuration object for training.
|
| 113 |
+
"""
|
| 114 |
+
self.tokenizer = tokenizer
|
| 115 |
+
# self.tokenizer.padding_side = "left" # needed for flash attention
|
| 116 |
+
if self.tokenizer.pad_token_id is None:
|
| 117 |
+
self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
|
| 118 |
+
self.lr_scheduler = lr_scheduler
|
| 119 |
+
self.accelerator = Accelerator()
|
| 120 |
+
(
|
| 121 |
+
self.policy,
|
| 122 |
+
self.policy_optimizer,
|
| 123 |
+
self.critic,
|
| 124 |
+
self.critic_optimizer,
|
| 125 |
+
) = self.accelerator.prepare(policy, policy_optimizer, critic, critic_optimizer)
|
| 126 |
+
|
| 127 |
+
self.critic_lr_scheduler = critic_lr_scheduler
|
| 128 |
+
self.tally = Tally()
|
| 129 |
+
|
| 130 |
+
if use_gradient_checkpointing == True:
|
| 131 |
+
self.policy.gradient_checkpointing_enable(dict(use_reentrant=False))
|
| 132 |
+
if critic is not None:
|
| 133 |
+
self.critic.gradient_checkpointing_enable(dict(use_reentrant=False))
|
| 134 |
+
|
| 135 |
+
self.save_path = save_path
|
| 136 |
+
|
| 137 |
+
# Load trainer state if it exists
|
| 138 |
+
self.trainer_annealing_state_path = os.path.join(
|
| 139 |
+
self.save_path, "trainer_annealing_state.pkl"
|
| 140 |
+
)
|
| 141 |
+
if os.path.exists(self.trainer_annealing_state_path):
|
| 142 |
+
logger.info(
|
| 143 |
+
f"Loading trainer state from {self.trainer_annealing_state_path}"
|
| 144 |
+
)
|
| 145 |
+
self.trainer_annealing_state = pickle.load(
|
| 146 |
+
open(self.trainer_annealing_state_path, "rb")
|
| 147 |
+
)
|
| 148 |
+
else:
|
| 149 |
+
self.trainer_annealing_state = TrainerAnnealingState()
|
| 150 |
+
|
| 151 |
+
# Load policy optimizer state if it exists
|
| 152 |
+
self.policy_optimizer_path = os.path.join(
|
| 153 |
+
self.save_path, "policy_optimizer_state.pt"
|
| 154 |
+
)
|
| 155 |
+
if os.path.exists(self.policy_optimizer_path):
|
| 156 |
+
logger.info(
|
| 157 |
+
f"Loading policy optimizer state from {self.policy_optimizer_path}"
|
| 158 |
+
)
|
| 159 |
+
self.policy_optimizer.load_state_dict(
|
| 160 |
+
torch.load(self.policy_optimizer_path)
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
# Load critic optimizer state if it exists
|
| 164 |
+
self.critic_optimizer_path = os.path.join(
|
| 165 |
+
self.save_path, "critic_optimizer_state.pt"
|
| 166 |
+
)
|
| 167 |
+
if (
|
| 168 |
+
os.path.exists(self.critic_optimizer_path)
|
| 169 |
+
and self.critic_optimizer is not None
|
| 170 |
+
):
|
| 171 |
+
logger.info(
|
| 172 |
+
f"Loading critic optimizer state from {self.critic_optimizer_path}"
|
| 173 |
+
)
|
| 174 |
+
self.critic_optimizer.load_state_dict(
|
| 175 |
+
torch.load(self.critic_optimizer_path)
|
| 176 |
+
)
|
| 177 |
+
self.device = self.accelerator.device
|
| 178 |
+
self.entropy_coeff = entropy_coeff
|
| 179 |
+
self.entropy_topk = entropy_topk
|
| 180 |
+
self.entropy_mask_regex = entropy_mask_regex
|
| 181 |
+
self.kl_coeff = kl_coeff
|
| 182 |
+
self.gradient_clipping = gradient_clipping
|
| 183 |
+
self.restrict_tokens = restrict_tokens
|
| 184 |
+
self.mini_batch_size = mini_batch_size
|
| 185 |
+
self.use_gradient_checkpointing = use_gradient_checkpointing
|
| 186 |
+
self.temperature = temperature
|
| 187 |
+
self.use_gae = use_gae
|
| 188 |
+
self.whiten_advantages = whiten_advantages
|
| 189 |
+
self.whiten_advantages_time_step_wise = whiten_advantages_time_step_wise
|
| 190 |
+
self.use_rloo = use_rloo
|
| 191 |
+
self.skip_discounted_state_visitation = skip_discounted_state_visitation
|
| 192 |
+
self.use_gae_lambda_annealing = use_gae_lambda_annealing
|
| 193 |
+
self.gae_lambda_annealing_limit = gae_lambda_annealing_limit
|
| 194 |
+
if use_gae_lambda_annealing:
|
| 195 |
+
self.gae_lambda_annealing_method: Callable[
|
| 196 |
+
[int], float
|
| 197 |
+
] = lambda step: eval(gae_lambda_annealing_method)(
|
| 198 |
+
step=step, **gae_lambda_annealing_method_params
|
| 199 |
+
)
|
| 200 |
+
self.discount_factor = discount_factor
|
| 201 |
+
self.enable_tokenwise_logging = enable_tokenwise_logging
|
| 202 |
+
self.reward_normalizing_constant = reward_normalizing_constant
|
| 203 |
+
self.pg_loss_normalization = pg_loss_normalization
|
| 204 |
+
self.critic_loss_type = critic_loss_type
|
| 205 |
+
self.exploration_prompts_to_remove = exploration_prompts_to_remove
|
| 206 |
+
# Common containers used by all trainers
|
| 207 |
+
self.training_data: dict = {}
|
| 208 |
+
self.debug_path_list: list[str] = []
|
| 209 |
+
self.policy_gradient_data = None
|
| 210 |
+
self.tally = Tally()
|
| 211 |
+
self.rollout_tally = RolloutTally()
|
| 212 |
+
self.tokenwise_tally: Union[ContextualizedTokenwiseTally, None] = None
|
| 213 |
+
self.filter_higher_refprob_tokens_kl = filter_higher_refprob_tokens_kl
|
| 214 |
+
self.truncated_importance_sampling_ratio_cap = (
|
| 215 |
+
truncated_importance_sampling_ratio_cap
|
| 216 |
+
)
|
| 217 |
+
self.importance_sampling_strategy = importance_sampling_strategy
|
| 218 |
+
|
| 219 |
+
def mask_non_restricted_token_logits(self, logits: torch.Tensor) -> torch.Tensor:
|
| 220 |
+
"""
|
| 221 |
+
Masks logits so that only allowed tokens (as specified in config.restrict_tokens)
|
| 222 |
+
and the EOS token are active.
|
| 223 |
+
All other logits are set to -inf, effectively removing them from the softmax.
|
| 224 |
+
|
| 225 |
+
Args:
|
| 226 |
+
logits (torch.Tensor): The logits tensor of shape (B, S, V).
|
| 227 |
+
|
| 228 |
+
Returns:
|
| 229 |
+
torch.Tensor: The masked logits tensor.
|
| 230 |
+
"""
|
| 231 |
+
# TODO: verify. Not sure what we do here is differentiable
|
| 232 |
+
# also, we recompute for nothing
|
| 233 |
+
|
| 234 |
+
if self.restrict_tokens is not None:
|
| 235 |
+
allowed_token_ids = []
|
| 236 |
+
for token in self.restrict_tokens:
|
| 237 |
+
token_ids = self.tokenizer(token, add_special_tokens=False)["input_ids"]
|
| 238 |
+
allowed_token_ids.append(token_ids[0])
|
| 239 |
+
allowed_token_ids.append(
|
| 240 |
+
self.tokenizer.eos_token_id
|
| 241 |
+
) # This token should always be active
|
| 242 |
+
allowed_token_ids = torch.tensor(allowed_token_ids, device=logits.device)
|
| 243 |
+
# Mask log_probs and probs to only allowed tokens
|
| 244 |
+
mask = torch.zeros_like(logits).bool() # (B, S, V)
|
| 245 |
+
mask[..., allowed_token_ids] = True
|
| 246 |
+
logits = torch.where(
|
| 247 |
+
mask,
|
| 248 |
+
logits,
|
| 249 |
+
torch.tensor(-float("inf"), device=logits.device),
|
| 250 |
+
)
|
| 251 |
+
|
| 252 |
+
return logits
|
| 253 |
+
|
| 254 |
+
# def get_gradient_magnitude(self, loss_term: torch.Tensor) -> float:
|
| 255 |
+
# """
|
| 256 |
+
# Computes the L2 norm of the gradients of the given loss term with respect to the model parameters.
|
| 257 |
+
|
| 258 |
+
# Args:
|
| 259 |
+
# loss_term (torch.Tensor): The loss tensor to compute gradients for.
|
| 260 |
+
|
| 261 |
+
# Returns:
|
| 262 |
+
# float: The L2 norm of the gradients, or 0.0 if no gradients are present.
|
| 263 |
+
# """
|
| 264 |
+
# with torch.no_grad():
|
| 265 |
+
# grads = torch.autograd.grad(
|
| 266 |
+
# loss_term,
|
| 267 |
+
# [p for p in self.policy.parameters() if p.requires_grad],
|
| 268 |
+
# retain_graph=True,
|
| 269 |
+
# allow_unused=True,
|
| 270 |
+
# )
|
| 271 |
+
# grads = [g for g in grads if g is not None]
|
| 272 |
+
# if not grads:
|
| 273 |
+
# return torch.tensor(0.0, device=loss_term.device)
|
| 274 |
+
# return torch.norm(torch.stack([g.norm(2) for g in grads])).item()
|
| 275 |
+
|
| 276 |
+
def apply_reinforce_step(
|
| 277 |
+
self,
|
| 278 |
+
training_batch: TrainingBatch,
|
| 279 |
+
) -> None:
|
| 280 |
+
"""
|
| 281 |
+
Applies a single REINFORCE policy gradient step using the provided batch of rollouts.
|
| 282 |
+
Handles batching, loss computation (including entropy and KL regularization), gradient accumulation, and optimizer step.
|
| 283 |
+
Optionally logs various metrics and statistics.
|
| 284 |
+
|
| 285 |
+
Args:
|
| 286 |
+
paths (list[str]): List of game complete file paths for each rollout.
|
| 287 |
+
contexts (list[torch.Tensor]): List of context tensors for each rollout.
|
| 288 |
+
credits (list[torch.Tensor]): List of credit tensors (rewards/advantages) for each rollout.
|
| 289 |
+
action_masks (list[torch.Tensor]): List of action mask tensors for each rollout.
|
| 290 |
+
"""
|
| 291 |
+
with resource_logger_context(logger, "Apply reinforce step"):
|
| 292 |
+
self.policy.train()
|
| 293 |
+
mb_size = self.mini_batch_size
|
| 294 |
+
nb_rollouts = len(training_batch)
|
| 295 |
+
|
| 296 |
+
# Initialize running mean logs
|
| 297 |
+
running_mean_logs = {
|
| 298 |
+
"rl_objective": 0.0,
|
| 299 |
+
"policy_gradient_loss": 0.0,
|
| 300 |
+
"policy_gradient_norm": 0.0,
|
| 301 |
+
"log_probs": 0.0,
|
| 302 |
+
"credits": 0.0,
|
| 303 |
+
"entropy": 0.0,
|
| 304 |
+
"engine_log_probs_diff_clampfrac": 0.0,
|
| 305 |
+
"tis_imp_ratio": 0.0,
|
| 306 |
+
"ref_log_probs_diff_clampfrac": 0.0,
|
| 307 |
+
"higher_refprob_frac": 0.0,
|
| 308 |
+
"tis_imp_ratio_clampfrac": 0.0,
|
| 309 |
+
}
|
| 310 |
+
if self.entropy_coeff != 0.0:
|
| 311 |
+
running_mean_logs["entropy"] = 0.0
|
| 312 |
+
if self.kl_coeff != 0.0:
|
| 313 |
+
running_mean_logs["kl_divergence"] = 0.0
|
| 314 |
+
|
| 315 |
+
# Get total number of tokens generated
|
| 316 |
+
total_tokens_generated = 0
|
| 317 |
+
for att_mask in training_batch.batch_action_mask:
|
| 318 |
+
total_tokens_generated += att_mask.sum()
|
| 319 |
+
|
| 320 |
+
# Obtain loss normalization
|
| 321 |
+
if self.pg_loss_normalization == "nb_tokens":
|
| 322 |
+
normalization_factor = total_tokens_generated
|
| 323 |
+
elif self.pg_loss_normalization == "batch":
|
| 324 |
+
normalization_factor = np.ceil(nb_rollouts / mb_size).astype(int)
|
| 325 |
+
else:
|
| 326 |
+
raise ValueError(
|
| 327 |
+
f"Invalid pg_loss_normalization: {self.pg_loss_normalization}"
|
| 328 |
+
)
|
| 329 |
+
|
| 330 |
+
# Gradient accumulation for each mini-batch
|
| 331 |
+
for mb in range(0, nb_rollouts, mb_size):
|
| 332 |
+
logger.info(f"Processing mini-batch {mb} of {nb_rollouts}")
|
| 333 |
+
loss = 0.0
|
| 334 |
+
training_mb = training_batch[mb : mb + mb_size]
|
| 335 |
+
training_mb = training_mb.get_padded_tensors()
|
| 336 |
+
training_mb.to(self.device)
|
| 337 |
+
(
|
| 338 |
+
tokens_mb,
|
| 339 |
+
action_mask_mb,
|
| 340 |
+
entropy_mask_mb,
|
| 341 |
+
credits_mb,
|
| 342 |
+
engine_log_probs_mb,
|
| 343 |
+
timesteps_mb,
|
| 344 |
+
) = (
|
| 345 |
+
training_mb.batch_input_ids,
|
| 346 |
+
training_mb.batch_action_mask,
|
| 347 |
+
training_mb.batch_entropy_mask,
|
| 348 |
+
training_mb.batch_credits,
|
| 349 |
+
training_mb.batch_engine_log_probs,
|
| 350 |
+
training_mb.batch_timesteps,
|
| 351 |
+
)
|
| 352 |
+
|
| 353 |
+
# Next token prediction
|
| 354 |
+
contexts_mb = tokens_mb[:, :-1]
|
| 355 |
+
shifted_contexts_mb = tokens_mb[:, 1:]
|
| 356 |
+
action_mask_mb = action_mask_mb[:, 1:]
|
| 357 |
+
entropy_mask_mb = entropy_mask_mb[:, 1:]
|
| 358 |
+
credits_mb = credits_mb[:, 1:]
|
| 359 |
+
engine_log_probs_mb = engine_log_probs_mb[:, 1:]
|
| 360 |
+
timesteps_mb = timesteps_mb[:, 1:]
|
| 361 |
+
|
| 362 |
+
if self.enable_tokenwise_logging:
|
| 363 |
+
self.tokenwise_tally.set_action_mask(action_mask=action_mask_mb)
|
| 364 |
+
self.tokenwise_tally.set_range(range=(mb, mb + mb_size))
|
| 365 |
+
self.tokenwise_tally.add_contexts(contexts=contexts_mb)
|
| 366 |
+
self.tokenwise_tally.add_data(
|
| 367 |
+
metric_id="next_token",
|
| 368 |
+
metrics=shifted_contexts_mb,
|
| 369 |
+
to_tids=True,
|
| 370 |
+
)
|
| 371 |
+
self.tokenwise_tally.add_data(
|
| 372 |
+
metric_id="entropy_mask",
|
| 373 |
+
metrics=entropy_mask_mb,
|
| 374 |
+
)
|
| 375 |
+
|
| 376 |
+
if self.enable_tokenwise_logging:
|
| 377 |
+
self.tokenwise_tally.add_data(
|
| 378 |
+
metric_id="next_token_credit", metrics=credits_mb
|
| 379 |
+
)
|
| 380 |
+
|
| 381 |
+
# Forward pass + cast to FP-32 for higher prec.
|
| 382 |
+
# TODO: create attention mask if not relying on default (assume causal llm)
|
| 383 |
+
logits = self.policy(input_ids=contexts_mb)[0] # (B, S, V)
|
| 384 |
+
|
| 385 |
+
# Mask non-restricted tokens
|
| 386 |
+
if self.restrict_tokens is not None:
|
| 387 |
+
logits = self.mask_non_restricted_token_logits(logits)
|
| 388 |
+
|
| 389 |
+
logits /= self.temperature # (B, S, V)
|
| 390 |
+
|
| 391 |
+
# Compute new log probabilities
|
| 392 |
+
log_probs = F.log_softmax(logits, dim=-1) # (B, S, V)
|
| 393 |
+
|
| 394 |
+
# Get log probabilities of actions taken during rollouts
|
| 395 |
+
action_log_probs = log_probs.gather(
|
| 396 |
+
dim=-1, index=shifted_contexts_mb.unsqueeze(-1)
|
| 397 |
+
).squeeze(
|
| 398 |
+
-1
|
| 399 |
+
) # (B, S)
|
| 400 |
+
if self.pg_loss_normalization == "batch":
|
| 401 |
+
den_running_mean = action_mask_mb.sum() * normalization_factor
|
| 402 |
+
else:
|
| 403 |
+
den_running_mean = normalization_factor
|
| 404 |
+
running_mean_logs["log_probs"] += (
|
| 405 |
+
action_log_probs * action_mask_mb
|
| 406 |
+
).sum().item() / den_running_mean
|
| 407 |
+
running_mean_logs["credits"] += (
|
| 408 |
+
credits_mb * action_mask_mb
|
| 409 |
+
).sum().item() / den_running_mean
|
| 410 |
+
|
| 411 |
+
if self.enable_tokenwise_logging:
|
| 412 |
+
self.tokenwise_tally.add_data(
|
| 413 |
+
metric_id="next_token_log_prob",
|
| 414 |
+
metrics=action_log_probs,
|
| 415 |
+
)
|
| 416 |
+
self.tokenwise_tally.add_data(
|
| 417 |
+
metric_id="engine_next_token_log_prob",
|
| 418 |
+
metrics=engine_log_probs_mb,
|
| 419 |
+
)
|
| 420 |
+
self.tokenwise_tally.add_data(
|
| 421 |
+
metric_id="next_token_prob",
|
| 422 |
+
metrics=torch.exp(action_log_probs),
|
| 423 |
+
)
|
| 424 |
+
top_k_indices = torch.topk(logits, k=5, dim=-1).indices
|
| 425 |
+
self.tokenwise_tally.add_data(
|
| 426 |
+
metric_id=f"top_{5}_tids",
|
| 427 |
+
metrics=top_k_indices,
|
| 428 |
+
to_tids=True,
|
| 429 |
+
)
|
| 430 |
+
self.tokenwise_tally.add_data(
|
| 431 |
+
metric_id=f"top_{5}_probs",
|
| 432 |
+
metrics=torch.exp(log_probs).gather(
|
| 433 |
+
dim=-1, index=top_k_indices
|
| 434 |
+
),
|
| 435 |
+
)
|
| 436 |
+
|
| 437 |
+
rewarded_action_log_probs = (
|
| 438 |
+
action_mask_mb * credits_mb * action_log_probs
|
| 439 |
+
)
|
| 440 |
+
# (B, S)
|
| 441 |
+
INVALID_LOGPROB = 1.0
|
| 442 |
+
CLAMP_VALUE = 40.0
|
| 443 |
+
masked_action_log_probs = torch.masked_fill(
|
| 444 |
+
action_log_probs, ~action_mask_mb, INVALID_LOGPROB
|
| 445 |
+
)
|
| 446 |
+
masked_engine_log_probs = torch.masked_fill(
|
| 447 |
+
engine_log_probs_mb, ~action_mask_mb, INVALID_LOGPROB
|
| 448 |
+
)
|
| 449 |
+
with torch.no_grad():
|
| 450 |
+
action_engine_log_probs_diff = (
|
| 451 |
+
masked_action_log_probs - masked_engine_log_probs
|
| 452 |
+
).clamp(-CLAMP_VALUE, CLAMP_VALUE)
|
| 453 |
+
running_mean_logs["engine_log_probs_diff_clampfrac"] += (
|
| 454 |
+
action_engine_log_probs_diff.abs()
|
| 455 |
+
.eq(CLAMP_VALUE)
|
| 456 |
+
.float()
|
| 457 |
+
.sum()
|
| 458 |
+
.item()
|
| 459 |
+
/ den_running_mean
|
| 460 |
+
)
|
| 461 |
+
if self.importance_sampling_strategy == "per_sequence":
|
| 462 |
+
tis_imp_ratio = torch.zeros_like(action_engine_log_probs_diff)
|
| 463 |
+
for mb_idx in range(action_engine_log_probs_diff.shape[0]):
|
| 464 |
+
valid_token_mask = action_mask_mb[mb_idx]
|
| 465 |
+
timestep_ids = timesteps_mb[mb_idx][valid_token_mask]
|
| 466 |
+
timestep_logprob_diffs = action_engine_log_probs_diff[mb_idx][
|
| 467 |
+
valid_token_mask
|
| 468 |
+
]
|
| 469 |
+
max_timestep = int(timestep_ids.max().item()) + 1
|
| 470 |
+
timestep_sums = torch.zeros(
|
| 471 |
+
max_timestep,
|
| 472 |
+
device=action_engine_log_probs_diff.device,
|
| 473 |
+
dtype=action_engine_log_probs_diff.dtype,
|
| 474 |
+
)
|
| 475 |
+
timestep_sums.scatter_add_(
|
| 476 |
+
0, timestep_ids, timestep_logprob_diffs
|
| 477 |
+
)
|
| 478 |
+
timestep_ratios = torch.exp(timestep_sums)
|
| 479 |
+
tis_imp_ratio[
|
| 480 |
+
mb_idx, valid_token_mask
|
| 481 |
+
] = timestep_ratios.gather(0, timestep_ids)
|
| 482 |
+
else:
|
| 483 |
+
tis_imp_ratio = torch.exp(action_engine_log_probs_diff)
|
| 484 |
+
running_mean_logs["tis_imp_ratio"] += (
|
| 485 |
+
tis_imp_ratio * action_mask_mb
|
| 486 |
+
).sum().item() / den_running_mean
|
| 487 |
+
if self.truncated_importance_sampling_ratio_cap > 0.0:
|
| 488 |
+
tis_imp_ratio = torch.clamp(
|
| 489 |
+
tis_imp_ratio, max=self.truncated_importance_sampling_ratio_cap
|
| 490 |
+
)
|
| 491 |
+
running_mean_logs["tis_imp_ratio_clampfrac"] += (
|
| 492 |
+
tis_imp_ratio.eq(self.truncated_importance_sampling_ratio_cap)
|
| 493 |
+
.float()
|
| 494 |
+
.sum()
|
| 495 |
+
.item()
|
| 496 |
+
) / den_running_mean
|
| 497 |
+
rewarded_action_log_probs = (
|
| 498 |
+
rewarded_action_log_probs * tis_imp_ratio
|
| 499 |
+
)
|
| 500 |
+
|
| 501 |
+
if self.enable_tokenwise_logging:
|
| 502 |
+
self.tokenwise_tally.add_data(
|
| 503 |
+
metric_id="next_token_clogπ",
|
| 504 |
+
metrics=rewarded_action_log_probs,
|
| 505 |
+
)
|
| 506 |
+
|
| 507 |
+
# Add value term to loss
|
| 508 |
+
if self.pg_loss_normalization == "batch":
|
| 509 |
+
nb_act_tokens = action_mask_mb.sum()
|
| 510 |
+
mb_value = -rewarded_action_log_probs.sum() / nb_act_tokens
|
| 511 |
+
else:
|
| 512 |
+
mb_value = -rewarded_action_log_probs.sum()
|
| 513 |
+
|
| 514 |
+
loss += mb_value
|
| 515 |
+
running_mean_logs["rl_objective"] += mb_value.item() / den_running_mean
|
| 516 |
+
|
| 517 |
+
# -------------------------------------------------
|
| 518 |
+
# Entropy Regularization
|
| 519 |
+
# -------------------------------------------------
|
| 520 |
+
# Only apply entropy on distribution defined over most probable tokens
|
| 521 |
+
if self.entropy_topk is not None:
|
| 522 |
+
top_k_indices = torch.topk(
|
| 523 |
+
logits, k=self.entropy_topk, dim=-1
|
| 524 |
+
).indices
|
| 525 |
+
entropy_logits = logits.gather(dim=-1, index=top_k_indices)
|
| 526 |
+
else:
|
| 527 |
+
entropy_logits = logits
|
| 528 |
+
|
| 529 |
+
token_entropy_terms = -F.softmax(
|
| 530 |
+
entropy_logits, dim=-1
|
| 531 |
+
) * F.log_softmax(
|
| 532 |
+
entropy_logits, dim=-1
|
| 533 |
+
) # (B, S, T)
|
| 534 |
+
token_entropy_terms *= (
|
| 535 |
+
action_mask_mb[:, :, None] * entropy_mask_mb[:, :, None]
|
| 536 |
+
) # only get loss on specific action tokens
|
| 537 |
+
|
| 538 |
+
mb_entropy = token_entropy_terms.sum(dim=-1)
|
| 539 |
+
|
| 540 |
+
if self.enable_tokenwise_logging:
|
| 541 |
+
self.tokenwise_tally.add_data(
|
| 542 |
+
metric_id="entropy",
|
| 543 |
+
metrics=mb_entropy,
|
| 544 |
+
)
|
| 545 |
+
if self.pg_loss_normalization == "batch":
|
| 546 |
+
nb_act_tokens = action_mask_mb.sum()
|
| 547 |
+
mb_entropy = -mb_entropy.sum() / nb_act_tokens
|
| 548 |
+
else:
|
| 549 |
+
mb_entropy = -mb_entropy.sum()
|
| 550 |
+
running_mean_logs["entropy"] += -mb_entropy.item() / den_running_mean
|
| 551 |
+
if self.entropy_coeff != 0.0:
|
| 552 |
+
mb_entropy *= self.entropy_coeff
|
| 553 |
+
loss += mb_entropy
|
| 554 |
+
|
| 555 |
+
# -------------------------------------------------
|
| 556 |
+
# KL-DIVERGENCE
|
| 557 |
+
# -------------------------------------------------
|
| 558 |
+
if self.kl_coeff != 0.0:
|
| 559 |
+
ref_model_logits = self.policy.get_base_model_logits(contexts_mb)
|
| 560 |
+
ref_model_logits = ref_model_logits / self.temperature
|
| 561 |
+
# (B, S, V)
|
| 562 |
+
ref_model_logits = self.mask_non_restricted_token_logits(
|
| 563 |
+
logits=ref_model_logits
|
| 564 |
+
)
|
| 565 |
+
# (B, S, V)
|
| 566 |
+
ref_model_log_probs = F.log_softmax(ref_model_logits, dim=-1)
|
| 567 |
+
# (B, S, V)
|
| 568 |
+
ref_model_action_log_probs = ref_model_log_probs.gather(
|
| 569 |
+
dim=-1, index=shifted_contexts_mb.unsqueeze(-1)
|
| 570 |
+
).squeeze(
|
| 571 |
+
-1
|
| 572 |
+
) # (B,S)
|
| 573 |
+
# Approximating KL Divergence (see refs in docstring)
|
| 574 |
+
# Ref 1: http://joschu.net/blog/kl-approx.html
|
| 575 |
+
# Ref 2: https://github.dev/huggingface/trl/blob/main/trl/trainer/grpo_trainer.py#L1332
|
| 576 |
+
masked_ref_model_action_log_probs = torch.masked_fill(
|
| 577 |
+
ref_model_action_log_probs, ~action_mask_mb, INVALID_LOGPROB
|
| 578 |
+
)
|
| 579 |
+
action_log_probs_diff = (
|
| 580 |
+
masked_ref_model_action_log_probs - masked_action_log_probs
|
| 581 |
+
).clamp(-CLAMP_VALUE, CLAMP_VALUE)
|
| 582 |
+
running_mean_logs["ref_log_probs_diff_clampfrac"] += (
|
| 583 |
+
action_log_probs_diff.abs().eq(CLAMP_VALUE).float().sum().item()
|
| 584 |
+
/ den_running_mean
|
| 585 |
+
)
|
| 586 |
+
if self.filter_higher_refprob_tokens_kl:
|
| 587 |
+
higher_refprob_tokens_mask = action_log_probs_diff > 0.0
|
| 588 |
+
running_mean_logs["higher_refprob_frac"] += (
|
| 589 |
+
higher_refprob_tokens_mask.sum().item() / den_running_mean
|
| 590 |
+
)
|
| 591 |
+
action_log_probs_diff = action_log_probs_diff * (
|
| 592 |
+
~higher_refprob_tokens_mask
|
| 593 |
+
)
|
| 594 |
+
kl_div = torch.expm1(action_log_probs_diff) - action_log_probs_diff
|
| 595 |
+
kl_div *= action_mask_mb # We only care about KLD of action tokens
|
| 596 |
+
if self.truncated_importance_sampling_ratio_cap > 0.0:
|
| 597 |
+
kl_div = kl_div * tis_imp_ratio
|
| 598 |
+
kl_div *= self.kl_coeff
|
| 599 |
+
if self.enable_tokenwise_logging:
|
| 600 |
+
self.tokenwise_tally.add_data(
|
| 601 |
+
metric_id="ref_model_next_token_log_prob",
|
| 602 |
+
metrics=ref_model_action_log_probs,
|
| 603 |
+
)
|
| 604 |
+
self.tokenwise_tally.add_data(
|
| 605 |
+
metric_id="kl_divergence",
|
| 606 |
+
metrics=kl_div,
|
| 607 |
+
)
|
| 608 |
+
|
| 609 |
+
if self.pg_loss_normalization == "batch":
|
| 610 |
+
nb_act_tokens = action_mask_mb.sum()
|
| 611 |
+
mb_kl = kl_div.sum() / nb_act_tokens
|
| 612 |
+
else:
|
| 613 |
+
mb_kl = kl_div.sum()
|
| 614 |
+
running_mean_logs["kl_divergence"] += (
|
| 615 |
+
mb_kl.item() / den_running_mean
|
| 616 |
+
)
|
| 617 |
+
loss += mb_kl
|
| 618 |
+
|
| 619 |
+
# Accumulate gradient
|
| 620 |
+
running_mean_logs["policy_gradient_loss"] += (
|
| 621 |
+
loss.item() / den_running_mean
|
| 622 |
+
)
|
| 623 |
+
loss /= normalization_factor
|
| 624 |
+
self.accelerator.backward(loss)
|
| 625 |
+
|
| 626 |
+
# ensure gpu memory is freed
|
| 627 |
+
del training_mb
|
| 628 |
+
del log_probs
|
| 629 |
+
del logits
|
| 630 |
+
del loss
|
| 631 |
+
del action_log_probs
|
| 632 |
+
del rewarded_action_log_probs
|
| 633 |
+
|
| 634 |
+
logger.info(
|
| 635 |
+
f"Accumulated the policy gradient loss for {total_tokens_generated} tokens."
|
| 636 |
+
)
|
| 637 |
+
|
| 638 |
+
# Clip gradients and take step
|
| 639 |
+
if self.gradient_clipping is not None:
|
| 640 |
+
grad_norm = self.accelerator.clip_grad_norm_(
|
| 641 |
+
self.policy.parameters(), self.gradient_clipping
|
| 642 |
+
)
|
| 643 |
+
running_mean_logs["policy_gradient_norm"] += grad_norm.item()
|
| 644 |
+
|
| 645 |
+
# Take step
|
| 646 |
+
self.policy_optimizer.step()
|
| 647 |
+
self.policy_optimizer.zero_grad()
|
| 648 |
+
|
| 649 |
+
# Store logs
|
| 650 |
+
for key, value in running_mean_logs.items():
|
| 651 |
+
self.tally.add_metric(path=key, metric=value)
|
| 652 |
+
|
| 653 |
+
# Clear
|
| 654 |
+
# TODO: verify
|
| 655 |
+
self.accelerator.clear(self.policy, self.policy_optimizer)
|
| 656 |
+
import gc
|
| 657 |
+
|
| 658 |
+
gc.collect()
|
| 659 |
+
torch.cuda.empty_cache()
|
| 660 |
+
return running_mean_logs
|
| 661 |
+
|
| 662 |
+
def get_advantages_with_critic_gradient_accumulation(
|
| 663 |
+
self, trajectories: TrajectoryBatch, critic_loss_scaling_factor: float = 2.0
|
| 664 |
+
) -> torch.FloatTensor:
|
| 665 |
+
"""
|
| 666 |
+
TOWRITE
|
| 667 |
+
Uses GAE if enabled, otherwise uses Monte Carlo returns.
|
| 668 |
+
Optionally trains the critic if GAE is used.
|
| 669 |
+
Returns:
|
| 670 |
+
advantages: NestedFloatTensors
|
| 671 |
+
"""
|
| 672 |
+
|
| 673 |
+
mb_size = self.mini_batch_size
|
| 674 |
+
batch_size = trajectories.rollout_ids.shape[0]
|
| 675 |
+
agent_id = trajectories.agent_ids[0]
|
| 676 |
+
batch_rewards = trajectories.batch_rewards
|
| 677 |
+
|
| 678 |
+
######################################
|
| 679 |
+
# use critic for advantage estimation
|
| 680 |
+
######################################
|
| 681 |
+
if self.use_gae:
|
| 682 |
+
if "buffer" in agent_id:
|
| 683 |
+
self.critic.eval()
|
| 684 |
+
training = False
|
| 685 |
+
else:
|
| 686 |
+
self.critic.train()
|
| 687 |
+
training = True
|
| 688 |
+
advantages = []
|
| 689 |
+
# critic_loss_scaling_factor comes learning single critic for two agents
|
| 690 |
+
normalization_factor = (
|
| 691 |
+
np.ceil(batch_size / mb_size).astype(int) * critic_loss_scaling_factor
|
| 692 |
+
)
|
| 693 |
+
# For each minibatch
|
| 694 |
+
for mb in range(0, batch_size, mb_size):
|
| 695 |
+
trajectory_mb = trajectories[mb : mb + mb_size]
|
| 696 |
+
trajectory_mb.to(self.device)
|
| 697 |
+
rewards_mb = trajectory_mb.batch_rewards
|
| 698 |
+
(
|
| 699 |
+
tokens_mb,
|
| 700 |
+
state_ends_mask_mb,
|
| 701 |
+
timestep_counts,
|
| 702 |
+
) = trajectory_mb.get_padded_tensors_for_critic()
|
| 703 |
+
# critic causal attention up to end flags
|
| 704 |
+
if training:
|
| 705 |
+
vals_estimate_full = self.critic(tokens_mb)
|
| 706 |
+
else:
|
| 707 |
+
with torch.no_grad():
|
| 708 |
+
vals_estimate_full = self.critic(tokens_mb)
|
| 709 |
+
|
| 710 |
+
# if vals_estimate_full.dim() == 3:
|
| 711 |
+
# vals_estimate_full = vals_estimate_full.squeeze(-1)
|
| 712 |
+
|
| 713 |
+
# Select only positions where states end, per sample → list of (jT,)
|
| 714 |
+
B = tokens_mb.shape[0]
|
| 715 |
+
vals_list = [
|
| 716 |
+
vals_estimate_full[b][state_ends_mask_mb[b]] for b in range(B)
|
| 717 |
+
]
|
| 718 |
+
|
| 719 |
+
# Pad to (B, max_jT) = (B, S)
|
| 720 |
+
vals_estimate_mb = pad_sequence(
|
| 721 |
+
vals_list, batch_first=True, padding_value=0.0
|
| 722 |
+
)
|
| 723 |
+
dtype = vals_estimate_mb.dtype
|
| 724 |
+
rewards_mb = pad_sequence(
|
| 725 |
+
rewards_mb, batch_first=True, padding_value=0.0
|
| 726 |
+
).to(
|
| 727 |
+
dtype=dtype
|
| 728 |
+
) # (B, S)
|
| 729 |
+
self.rollout_tally.add_metric(
|
| 730 |
+
path=["batch_rewards"],
|
| 731 |
+
rollout_tally_item=RolloutTallyItem(
|
| 732 |
+
crn_ids=trajectory_mb.crn_ids,
|
| 733 |
+
rollout_ids=trajectory_mb.rollout_ids,
|
| 734 |
+
agent_ids=trajectory_mb.agent_ids,
|
| 735 |
+
metric_matrix=rewards_mb,
|
| 736 |
+
),
|
| 737 |
+
)
|
| 738 |
+
if self.reward_normalizing_constant != 1.0:
|
| 739 |
+
rewards_mb /= self.reward_normalizing_constant
|
| 740 |
+
|
| 741 |
+
det_vals_estimate_mb = vals_estimate_mb.detach() # (B, max_jT)
|
| 742 |
+
self.rollout_tally.add_metric(
|
| 743 |
+
path=["mb_value_estimates_critic"],
|
| 744 |
+
rollout_tally_item=RolloutTallyItem(
|
| 745 |
+
crn_ids=trajectory_mb.crn_ids,
|
| 746 |
+
rollout_ids=trajectory_mb.rollout_ids,
|
| 747 |
+
agent_ids=trajectory_mb.agent_ids,
|
| 748 |
+
metric_matrix=det_vals_estimate_mb,
|
| 749 |
+
),
|
| 750 |
+
)
|
| 751 |
+
|
| 752 |
+
# Append a 0 value to the end of the value estimates
|
| 753 |
+
if det_vals_estimate_mb.shape[1] == rewards_mb.shape[1]:
|
| 754 |
+
Bsize = det_vals_estimate_mb.shape[0]
|
| 755 |
+
device = det_vals_estimate_mb.device
|
| 756 |
+
dtype = det_vals_estimate_mb.dtype
|
| 757 |
+
det_vals_estimate_mb = torch.cat(
|
| 758 |
+
[
|
| 759 |
+
det_vals_estimate_mb,
|
| 760 |
+
torch.zeros((Bsize, 1), device=device, dtype=dtype),
|
| 761 |
+
],
|
| 762 |
+
dim=1,
|
| 763 |
+
) # (B, max_jT+1)
|
| 764 |
+
else:
|
| 765 |
+
raise ValueError(
|
| 766 |
+
"Incompatible shapes for value estimates and rewards."
|
| 767 |
+
)
|
| 768 |
+
|
| 769 |
+
# Get annealed lambda
|
| 770 |
+
if self.use_gae_lambda_annealing:
|
| 771 |
+
annealing_constant = self.gae_lambda_annealing_method(
|
| 772 |
+
step=self.trainer_annealing_state.annealing_step_counter
|
| 773 |
+
)
|
| 774 |
+
annealed_lambda = (
|
| 775 |
+
self.gae_lambda_annealing_limit * annealing_constant
|
| 776 |
+
)
|
| 777 |
+
self.tally.add_metric(
|
| 778 |
+
path="annealed_lambda", metric=annealed_lambda
|
| 779 |
+
)
|
| 780 |
+
else:
|
| 781 |
+
annealed_lambda = self.gae_lambda_annealing_limit
|
| 782 |
+
|
| 783 |
+
# Get GAE advantages
|
| 784 |
+
gae_advantages = get_generalized_advantage_estimates(
|
| 785 |
+
rewards=rewards_mb,
|
| 786 |
+
value_estimates=det_vals_estimate_mb,
|
| 787 |
+
discount_factor=self.discount_factor,
|
| 788 |
+
lambda_coef=annealed_lambda,
|
| 789 |
+
) # (B, max_jT)
|
| 790 |
+
self.rollout_tally.add_metric(
|
| 791 |
+
path=["mb_gae_advantages"],
|
| 792 |
+
rollout_tally_item=RolloutTallyItem(
|
| 793 |
+
crn_ids=trajectory_mb.crn_ids,
|
| 794 |
+
rollout_ids=trajectory_mb.rollout_ids,
|
| 795 |
+
agent_ids=trajectory_mb.agent_ids,
|
| 796 |
+
metric_matrix=gae_advantages,
|
| 797 |
+
),
|
| 798 |
+
)
|
| 799 |
+
if training:
|
| 800 |
+
targets = (
|
| 801 |
+
gae_advantages.to(dtype=dtype) + det_vals_estimate_mb[:, :-1]
|
| 802 |
+
) # (B, max_jT) # A(s, a, b) + V(s) = Q(s, a, b)
|
| 803 |
+
self.rollout_tally.add_metric(
|
| 804 |
+
path=["mb_targets_critic"],
|
| 805 |
+
rollout_tally_item=RolloutTallyItem(
|
| 806 |
+
crn_ids=trajectory_mb.crn_ids,
|
| 807 |
+
rollout_ids=trajectory_mb.rollout_ids,
|
| 808 |
+
agent_ids=trajectory_mb.agent_ids,
|
| 809 |
+
metric_matrix=targets,
|
| 810 |
+
),
|
| 811 |
+
)
|
| 812 |
+
if self.critic_loss_type == "mse":
|
| 813 |
+
loss = F.mse_loss(
|
| 814 |
+
input=vals_estimate_mb,
|
| 815 |
+
target=targets,
|
| 816 |
+
)
|
| 817 |
+
elif self.critic_loss_type == "huber":
|
| 818 |
+
loss = F.huber_loss(
|
| 819 |
+
input=vals_estimate_mb,
|
| 820 |
+
target=targets,
|
| 821 |
+
)
|
| 822 |
+
self.tally.add_metric(path=["mb_critic_loss"], metric=loss.item())
|
| 823 |
+
# Accumulate gradient
|
| 824 |
+
loss /= normalization_factor
|
| 825 |
+
self.accelerator.backward(loss)
|
| 826 |
+
del loss
|
| 827 |
+
del targets
|
| 828 |
+
del vals_estimate_mb
|
| 829 |
+
del trajectory_mb
|
| 830 |
+
del vals_estimate_full
|
| 831 |
+
|
| 832 |
+
# Get jagged back using timestep_counts
|
| 833 |
+
advantages.extend(
|
| 834 |
+
[gae_advantages[i, : timestep_counts[i]] for i in range(B)]
|
| 835 |
+
)
|
| 836 |
+
|
| 837 |
+
######################################
|
| 838 |
+
# use exclusively Monte Carlo returns & rloo for advantage estimation
|
| 839 |
+
######################################
|
| 840 |
+
else:
|
| 841 |
+
lengths = [len(c) for c in batch_rewards]
|
| 842 |
+
padded_rewards = pad_sequence(
|
| 843 |
+
batch_rewards, batch_first=True, padding_value=0.0
|
| 844 |
+
)
|
| 845 |
+
self.rollout_tally.add_metric(
|
| 846 |
+
path=["mb_rewards"],
|
| 847 |
+
rollout_tally_item=RolloutTallyItem(
|
| 848 |
+
crn_ids=trajectories.crn_ids,
|
| 849 |
+
rollout_ids=trajectories.rollout_ids,
|
| 850 |
+
agent_ids=trajectories.agent_ids,
|
| 851 |
+
metric_matrix=padded_rewards,
|
| 852 |
+
),
|
| 853 |
+
)
|
| 854 |
+
if self.reward_normalizing_constant != 1.0:
|
| 855 |
+
padded_rewards /= self.reward_normalizing_constant
|
| 856 |
+
padded_advantages = get_discounted_returns(
|
| 857 |
+
rewards=padded_rewards,
|
| 858 |
+
discount_factor=self.discount_factor,
|
| 859 |
+
) # no baseline for now
|
| 860 |
+
if self.use_rloo:
|
| 861 |
+
is_grouped_by_rng = (
|
| 862 |
+
trajectories.crn_ids.unique().shape[0]
|
| 863 |
+
!= trajectories.crn_ids.shape[0]
|
| 864 |
+
)
|
| 865 |
+
if is_grouped_by_rng:
|
| 866 |
+
for crn_id in trajectories.crn_ids.unique():
|
| 867 |
+
rng_mask = trajectories.crn_ids == crn_id
|
| 868 |
+
rng_advantages = padded_advantages[rng_mask]
|
| 869 |
+
rng_advantages, _ = get_rloo_credits(credits=rng_advantages)
|
| 870 |
+
padded_advantages[rng_mask] = rng_advantages
|
| 871 |
+
else:
|
| 872 |
+
padded_advantages, _ = get_rloo_credits(credits=padded_advantages)
|
| 873 |
+
self.rollout_tally.add_metric(
|
| 874 |
+
path=["mb_rloo_advantages"],
|
| 875 |
+
rollout_tally_item=RolloutTallyItem(
|
| 876 |
+
crn_ids=trajectories.crn_ids,
|
| 877 |
+
rollout_ids=trajectories.rollout_ids,
|
| 878 |
+
agent_ids=trajectories.agent_ids,
|
| 879 |
+
metric_matrix=padded_advantages,
|
| 880 |
+
),
|
| 881 |
+
)
|
| 882 |
+
advantages = [
|
| 883 |
+
padded_advantages[i, : lengths[i]]
|
| 884 |
+
for i in range(padded_advantages.shape[0])
|
| 885 |
+
]
|
| 886 |
+
|
| 887 |
+
if self.whiten_advantages_time_step_wise or self.whiten_advantages:
|
| 888 |
+
lengths = [len(c) for c in advantages]
|
| 889 |
+
padded_advantages = pad_sequence(
|
| 890 |
+
advantages, batch_first=True, padding_value=0.0
|
| 891 |
+
)
|
| 892 |
+
if self.whiten_advantages_time_step_wise:
|
| 893 |
+
whitened_padded_advantages = whiten_advantages_time_step_wise(
|
| 894 |
+
padded_advantages
|
| 895 |
+
)
|
| 896 |
+
path = ["mb_whitened_advantages_time_step_wise"]
|
| 897 |
+
elif self.whiten_advantages:
|
| 898 |
+
whitened_padded_advantages = whiten_advantages(padded_advantages)
|
| 899 |
+
path = ["mb_whitened_advantages"]
|
| 900 |
+
self.rollout_tally.add_metric(
|
| 901 |
+
path=path,
|
| 902 |
+
rollout_tally_item=RolloutTallyItem(
|
| 903 |
+
crn_ids=trajectories.crn_ids,
|
| 904 |
+
rollout_ids=trajectories.rollout_ids,
|
| 905 |
+
agent_ids=trajectories.agent_ids,
|
| 906 |
+
metric_matrix=whitened_padded_advantages,
|
| 907 |
+
),
|
| 908 |
+
)
|
| 909 |
+
advantages = [
|
| 910 |
+
whitened_padded_advantages[i, : lengths[i]]
|
| 911 |
+
for i in range(whitened_padded_advantages.shape[0])
|
| 912 |
+
]
|
| 913 |
+
|
| 914 |
+
self.trainer_annealing_state.annealing_step_counter += 1
|
| 915 |
+
|
| 916 |
+
return advantages
|
| 917 |
+
|
| 918 |
+
@abstractmethod
|
| 919 |
+
def set_agent_trajectory_data(
|
| 920 |
+
self, agent_id: str, roots: list[RolloutTreeRootNode]
|
| 921 |
+
) -> None:
|
| 922 |
+
"""
|
| 923 |
+
TOWRITE
|
| 924 |
+
"""
|
| 925 |
+
pass
|
| 926 |
+
|
| 927 |
+
def set_trajectory_data(
|
| 928 |
+
self, roots: list[RolloutTreeRootNode], agent_ids: list[str]
|
| 929 |
+
) -> None:
|
| 930 |
+
"""
|
| 931 |
+
TOWRITE
|
| 932 |
+
"""
|
| 933 |
+
for agent_id in agent_ids:
|
| 934 |
+
self.set_agent_trajectory_data(agent_id, roots)
|
| 935 |
+
|
| 936 |
+
@abstractmethod
|
| 937 |
+
def share_advantage_data(self) -> list[AdvantagePacket]:
|
| 938 |
+
pass
|
| 939 |
+
|
| 940 |
+
@abstractmethod
|
| 941 |
+
def receive_advantage_data(self, advantage_packets: list[AdvantagePacket]) -> None:
|
| 942 |
+
pass
|
| 943 |
+
|
| 944 |
+
def set_policy_gradient_data(self, agent_ids: list[str]) -> None:
|
| 945 |
+
"""
|
| 946 |
+
Already set earlier # TODO: make it separate and clean
|
| 947 |
+
"""
|
| 948 |
+
self.policy_gradient_data = None
|
| 949 |
+
# for agent_id, trajectory_batch in self.training_data.items():
|
| 950 |
+
# if "buffer" in agent_id:
|
| 951 |
+
# continue
|
| 952 |
+
for agent_id in agent_ids:
|
| 953 |
+
assert "buffer" not in agent_id, "Buffer agents do not train policy"
|
| 954 |
+
trajectory_batch = self.training_data[agent_id]
|
| 955 |
+
tokenwise_batch_credits = get_tokenwise_credits(
|
| 956 |
+
batch_timesteps=trajectory_batch.batch_timesteps,
|
| 957 |
+
batch_credits=trajectory_batch.batch_credits,
|
| 958 |
+
)
|
| 959 |
+
policy_gradient_data = TrainingBatch(
|
| 960 |
+
rollout_ids=trajectory_batch.rollout_ids,
|
| 961 |
+
batch_input_ids=trajectory_batch.batch_input_ids,
|
| 962 |
+
batch_action_mask=trajectory_batch.batch_action_mask,
|
| 963 |
+
batch_entropy_mask=trajectory_batch.batch_entropy_mask,
|
| 964 |
+
batch_credits=tokenwise_batch_credits,
|
| 965 |
+
batch_engine_log_probs=trajectory_batch.batch_engine_log_probs,
|
| 966 |
+
batch_timesteps=trajectory_batch.batch_timesteps,
|
| 967 |
+
)
|
| 968 |
+
if self.policy_gradient_data is None:
|
| 969 |
+
self.policy_gradient_data = policy_gradient_data
|
| 970 |
+
else:
|
| 971 |
+
self.policy_gradient_data.append(policy_gradient_data)
|
| 972 |
+
|
| 973 |
+
self.training_data = {}
|
| 974 |
+
self.tokenwise_tally = ContextualizedTokenwiseTally(
|
| 975 |
+
tokenizer=self.tokenizer,
|
| 976 |
+
paths=self.debug_path_list,
|
| 977 |
+
)
|
| 978 |
+
|
| 979 |
+
def train(self) -> None:
|
| 980 |
+
"""
|
| 981 |
+
TOWRITE
|
| 982 |
+
"""
|
| 983 |
+
assert self.policy_gradient_data is not None, "Policy gradient data is not set"
|
| 984 |
+
if self.critic_optimizer is not None:
|
| 985 |
+
if self.gradient_clipping is not None:
|
| 986 |
+
grad_norm = self.accelerator.clip_grad_norm_(
|
| 987 |
+
self.critic.parameters(), self.gradient_clipping
|
| 988 |
+
)
|
| 989 |
+
self.tally.add_metric(
|
| 990 |
+
path="gradient_norm_critic", metric=grad_norm.item()
|
| 991 |
+
)
|
| 992 |
+
# Take step
|
| 993 |
+
self.critic_optimizer.step()
|
| 994 |
+
self.critic_optimizer.zero_grad()
|
| 995 |
+
self.accelerator.clear(self.critic, self.critic_optimizer)
|
| 996 |
+
import gc
|
| 997 |
+
|
| 998 |
+
gc.collect()
|
| 999 |
+
torch.cuda.empty_cache()
|
| 1000 |
+
running_mean_logs = self.apply_reinforce_step(
|
| 1001 |
+
training_batch=self.policy_gradient_data
|
| 1002 |
+
)
|
| 1003 |
+
return running_mean_logs
|
| 1004 |
+
|
| 1005 |
+
def export_training_tally(self, identifier: str, folder: str) -> None:
|
| 1006 |
+
"""
|
| 1007 |
+
Saves and resets the collected training metrics using the tally object.
|
| 1008 |
+
"""
|
| 1009 |
+
os.makedirs(folder, exist_ok=True)
|
| 1010 |
+
self.tally.save(identifier=identifier, folder=folder)
|
| 1011 |
+
self.tokenwise_tally.save(
|
| 1012 |
+
path=os.path.join(folder, f"{identifier}_tokenwise.csv")
|
| 1013 |
+
)
|
| 1014 |
+
self.rollout_tally.save(identifier=identifier, folder=folder)
|
| 1015 |
+
self.tally.reset()
|
| 1016 |
+
self.tokenwise_tally = None
|
| 1017 |
+
self.rollout_tally.reset()
|
| 1018 |
+
self.debug_path_list = []
|
| 1019 |
+
|
| 1020 |
+
def export_optimizer_states(self) -> None:
|
| 1021 |
+
"""
|
| 1022 |
+
Saves the optimizer states for both the main model and critic (if it exists).
|
| 1023 |
+
"""
|
| 1024 |
+
try:
|
| 1025 |
+
os.makedirs(self.save_path, exist_ok=True)
|
| 1026 |
+
|
| 1027 |
+
torch.save(self.policy_optimizer.state_dict(), self.policy_optimizer_path)
|
| 1028 |
+
logger.info(f"Saved main optimizer state to {self.policy_optimizer_path}")
|
| 1029 |
+
|
| 1030 |
+
if self.critic_optimizer is not None:
|
| 1031 |
+
torch.save(
|
| 1032 |
+
self.critic_optimizer.state_dict(), self.critic_optimizer_path
|
| 1033 |
+
)
|
| 1034 |
+
logger.info(
|
| 1035 |
+
f"Saved critic optimizer state to {self.critic_optimizer_path}"
|
| 1036 |
+
)
|
| 1037 |
+
except Exception as e:
|
| 1038 |
+
logger.error(f"Error saving optimizer states: {str(e)}")
|
| 1039 |
+
raise
|
| 1040 |
+
|
| 1041 |
+
def export_trainer_annealing_state(self) -> None:
|
| 1042 |
+
"""
|
| 1043 |
+
Saves the trainer state.
|
| 1044 |
+
"""
|
| 1045 |
+
with open(self.trainer_annealing_state_path, "wb") as f:
|
| 1046 |
+
pickle.dump(self.trainer_annealing_state, f)
|
| 1047 |
+
logger.info(f"Saved trainer state to {self.trainer_annealing_state_path}")
|
| 1048 |
+
|
| 1049 |
+
def export_trainer_states(self) -> None:
|
| 1050 |
+
"""
|
| 1051 |
+
Saves the trainer states.
|
| 1052 |
+
"""
|
| 1053 |
+
self.export_optimizer_states()
|
| 1054 |
+
self.export_trainer_annealing_state()
|
src_code_for_reproducibility/training/trainer_independent.py
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
|
| 3 |
+
"""
|
| 4 |
+
import logging
|
| 5 |
+
import os
|
| 6 |
+
import sys
|
| 7 |
+
from typing import Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn.functional as F
|
| 11 |
+
from accelerate import Accelerator
|
| 12 |
+
from pandas._libs.tslibs.offsets import CBMonthBegin
|
| 13 |
+
from peft import LoraConfig
|
| 14 |
+
from torch.nn.utils.rnn import pad_sequence
|
| 15 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 16 |
+
|
| 17 |
+
from mllm.markov_games.rollout_tree import *
|
| 18 |
+
from mllm.markov_games.rollout_tree import RolloutTreeRootNode
|
| 19 |
+
from mllm.training.credit_methods import (
|
| 20 |
+
get_discounted_returns,
|
| 21 |
+
get_discounted_state_visitation_credits,
|
| 22 |
+
get_generalized_advantage_estimates,
|
| 23 |
+
get_rloo_credits,
|
| 24 |
+
)
|
| 25 |
+
from mllm.training.tally_metrics import Tally
|
| 26 |
+
from mllm.training.tally_tokenwise import ContextualizedTokenwiseTally
|
| 27 |
+
from mllm.training.tokenize_chats import *
|
| 28 |
+
from mllm.training.tokenize_chats import process_training_chat
|
| 29 |
+
from mllm.training.trainer_common import BaseTrainer
|
| 30 |
+
from mllm.training.training_data_utils import *
|
| 31 |
+
from mllm.training.training_data_utils import (
|
| 32 |
+
TrainingBatch,
|
| 33 |
+
TrajectoryBatch,
|
| 34 |
+
get_tokenwise_credits,
|
| 35 |
+
)
|
| 36 |
+
from mllm.utils.resource_context import resource_logger_context
|
| 37 |
+
|
| 38 |
+
logger = logging.getLogger(__name__)
|
| 39 |
+
logger.addHandler(logging.StreamHandler(sys.stdout))
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
@dataclass
|
| 43 |
+
class TrainingData:
|
| 44 |
+
agent_id: str
|
| 45 |
+
main_data: TrajectoryBatch
|
| 46 |
+
# list-of-tensors: per rollout advantages with length jT
|
| 47 |
+
main_advantages: list[torch.FloatTensor] | None = None
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class TrainerNaive(BaseTrainer):
|
| 51 |
+
def set_agent_trajectory_data(
|
| 52 |
+
self, agent_id: str, roots: list[RolloutTreeRootNode]
|
| 53 |
+
) -> None:
|
| 54 |
+
"""
|
| 55 |
+
TOWRITE
|
| 56 |
+
"""
|
| 57 |
+
# TODO: append to current batch data instead, else we will only train for one agent!
|
| 58 |
+
self.policy_gradient_data = None
|
| 59 |
+
|
| 60 |
+
# Tensorize Chats
|
| 61 |
+
rollout_ids = []
|
| 62 |
+
crn_ids = [] # common random number id
|
| 63 |
+
batch_input_ids = []
|
| 64 |
+
batch_action_mask = []
|
| 65 |
+
batch_entropy_mask = []
|
| 66 |
+
batch_timesteps = []
|
| 67 |
+
batch_state_ends_mask = []
|
| 68 |
+
batch_engine_log_probs = []
|
| 69 |
+
batch_rewards = []
|
| 70 |
+
for root in roots:
|
| 71 |
+
rollout_id = root.id
|
| 72 |
+
self.debug_path_list.append(
|
| 73 |
+
"mgid:" + str(rollout_id) + "_agent_id:" + agent_id
|
| 74 |
+
)
|
| 75 |
+
rollout_ids.append(rollout_id)
|
| 76 |
+
crn_ids.append(root.crn_id)
|
| 77 |
+
chat, rewards = get_main_chat_list_and_rewards(agent_id=agent_id, root=root)
|
| 78 |
+
(
|
| 79 |
+
input_ids,
|
| 80 |
+
action_mask,
|
| 81 |
+
entropy_mask,
|
| 82 |
+
timesteps,
|
| 83 |
+
state_ends_mask,
|
| 84 |
+
engine_log_probs,
|
| 85 |
+
) = process_training_chat(
|
| 86 |
+
tokenizer=self.tokenizer,
|
| 87 |
+
chat_history=chat,
|
| 88 |
+
entropy_mask_regex=self.entropy_mask_regex,
|
| 89 |
+
exploration_prompts_to_remove=self.exploration_prompts_to_remove,
|
| 90 |
+
)
|
| 91 |
+
batch_input_ids.append(input_ids)
|
| 92 |
+
batch_action_mask.append(action_mask)
|
| 93 |
+
batch_entropy_mask.append(entropy_mask)
|
| 94 |
+
batch_timesteps.append(timesteps)
|
| 95 |
+
batch_state_ends_mask.append(state_ends_mask)
|
| 96 |
+
batch_engine_log_probs.append(engine_log_probs)
|
| 97 |
+
batch_rewards.append(rewards)
|
| 98 |
+
|
| 99 |
+
trajectory_batch = TrajectoryBatch(
|
| 100 |
+
rollout_ids=torch.tensor(rollout_ids, dtype=torch.int32),
|
| 101 |
+
crn_ids=torch.tensor(crn_ids, dtype=torch.int32),
|
| 102 |
+
agent_ids=[agent_id] * len(rollout_ids),
|
| 103 |
+
batch_input_ids=batch_input_ids,
|
| 104 |
+
batch_action_mask=batch_action_mask,
|
| 105 |
+
batch_entropy_mask=batch_entropy_mask,
|
| 106 |
+
batch_timesteps=batch_timesteps,
|
| 107 |
+
batch_state_ends_mask=batch_state_ends_mask,
|
| 108 |
+
batch_rewards=batch_rewards,
|
| 109 |
+
batch_engine_log_probs=batch_engine_log_probs,
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
# Get Advantages
|
| 113 |
+
batch_advantages: torch.FloatTensor = (
|
| 114 |
+
self.get_advantages_with_critic_gradient_accumulation(trajectory_batch)
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
# Discount state visitation (the mathematically correct way)
|
| 118 |
+
if not self.skip_discounted_state_visitation:
|
| 119 |
+
for i in range(len(batch_advantages)):
|
| 120 |
+
batch_advantages[i] = get_discounted_state_visitation_credits(
|
| 121 |
+
batch_advantages[i].unsqueeze(0),
|
| 122 |
+
self.discount_factor,
|
| 123 |
+
).squeeze(0)
|
| 124 |
+
|
| 125 |
+
self.training_data[agent_id] = TrainingData(
|
| 126 |
+
agent_id=agent_id,
|
| 127 |
+
main_data=trajectory_batch,
|
| 128 |
+
main_advantages=batch_advantages,
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
def receive_advantage_data(self, advantage_packets: list[AdvantagePacket]):
|
| 132 |
+
"""
|
| 133 |
+
This trainer ignores the advantages of the other trainers.
|
| 134 |
+
"""
|
| 135 |
+
for agent_id, agent_data in self.training_data.items():
|
| 136 |
+
self.training_data[agent_id] = agent_data.main_data
|
| 137 |
+
self.training_data[agent_id].batch_credits = agent_data.main_advantages
|
| 138 |
+
|
| 139 |
+
def share_advantage_data(self) -> list[AdvantagePacket]:
|
| 140 |
+
"""
|
| 141 |
+
Share the advantage data with other agents.
|
| 142 |
+
Returns:
|
| 143 |
+
AdvantagePacket: The advantage packet containing the agent's advantages.
|
| 144 |
+
"""
|
| 145 |
+
logger.info(f"Sharing advantage data.")
|
| 146 |
+
advantage_packets = []
|
| 147 |
+
for agent_id, agent_data in self.training_data.items():
|
| 148 |
+
advantage_packets.append(
|
| 149 |
+
AdvantagePacket(
|
| 150 |
+
agent_id=agent_id,
|
| 151 |
+
rollout_ids=agent_data.main_data.rollout_ids,
|
| 152 |
+
main_advantages=agent_data.main_advantages,
|
| 153 |
+
)
|
| 154 |
+
)
|
| 155 |
+
return advantage_packets
|
src_code_for_reproducibility/training/trainer_sum_rewards.py
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
|
| 3 |
+
"""
|
| 4 |
+
import logging
|
| 5 |
+
import os
|
| 6 |
+
import sys
|
| 7 |
+
from typing import Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn.functional as F
|
| 11 |
+
from accelerate import Accelerator
|
| 12 |
+
from pandas._libs.tslibs.offsets import CBMonthBegin
|
| 13 |
+
from peft import LoraConfig
|
| 14 |
+
from torch.nn.utils.rnn import pad_sequence
|
| 15 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 16 |
+
|
| 17 |
+
from mllm.markov_games.rollout_tree import *
|
| 18 |
+
from mllm.markov_games.rollout_tree import RolloutTreeRootNode
|
| 19 |
+
from mllm.training.credit_methods import (
|
| 20 |
+
get_discounted_returns,
|
| 21 |
+
get_discounted_state_visitation_credits,
|
| 22 |
+
get_generalized_advantage_estimates,
|
| 23 |
+
get_rloo_credits,
|
| 24 |
+
)
|
| 25 |
+
from mllm.training.tally_metrics import Tally
|
| 26 |
+
from mllm.training.tally_rollout import RolloutTally, RolloutTallyItem
|
| 27 |
+
from mllm.training.tally_tokenwise import ContextualizedTokenwiseTally
|
| 28 |
+
from mllm.training.tokenize_chats import *
|
| 29 |
+
from mllm.training.tokenize_chats import process_training_chat
|
| 30 |
+
from mllm.training.trainer_common import BaseTrainer
|
| 31 |
+
from mllm.training.trainer_independent import TrainerNaive, TrainingData
|
| 32 |
+
from mllm.training.training_data_utils import *
|
| 33 |
+
from mllm.training.training_data_utils import (
|
| 34 |
+
AdvantagePacket,
|
| 35 |
+
TrainingBatch,
|
| 36 |
+
TrajectoryBatch,
|
| 37 |
+
get_tokenwise_credits,
|
| 38 |
+
)
|
| 39 |
+
from mllm.utils.resource_context import resource_logger_context
|
| 40 |
+
|
| 41 |
+
logger = logging.getLogger(__name__)
|
| 42 |
+
logger.addHandler(logging.StreamHandler(sys.stdout))
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class TrainerSumRewards(TrainerNaive):
|
| 46 |
+
def receive_advantage_data(self, advantage_packets: list[AdvantagePacket]):
|
| 47 |
+
"""
|
| 48 |
+
Sums the advantages of the other trainers
|
| 49 |
+
"""
|
| 50 |
+
logger.info(f"Receiving advantage packets.")
|
| 51 |
+
|
| 52 |
+
assert (
|
| 53 |
+
len(advantage_packets) > 0
|
| 54 |
+
), "At least one advantage packet must be provided."
|
| 55 |
+
|
| 56 |
+
for agent_id, agent_data in self.training_data.items():
|
| 57 |
+
coagent_advantage_packets = [
|
| 58 |
+
packet for packet in advantage_packets if packet.agent_id != agent_id
|
| 59 |
+
]
|
| 60 |
+
agent_rollout_ids = agent_data.main_data.rollout_ids
|
| 61 |
+
agent_advantages = agent_data.main_advantages
|
| 62 |
+
co_agent_advantages = []
|
| 63 |
+
for rollout_id in agent_rollout_ids:
|
| 64 |
+
for co_agent_packet in coagent_advantage_packets:
|
| 65 |
+
if rollout_id in co_agent_packet.rollout_ids:
|
| 66 |
+
index = torch.where(rollout_id == co_agent_packet.rollout_ids)[
|
| 67 |
+
0
|
| 68 |
+
].item()
|
| 69 |
+
co_agent_advantages.append(
|
| 70 |
+
co_agent_packet.main_advantages[index]
|
| 71 |
+
)
|
| 72 |
+
# assumes that its two player game, with one co-agent
|
| 73 |
+
break
|
| 74 |
+
assert len(co_agent_advantages) == len(agent_advantages)
|
| 75 |
+
B = len(agent_advantages)
|
| 76 |
+
assert all(
|
| 77 |
+
a.shape[0] == b.shape[0]
|
| 78 |
+
for a, b in zip(co_agent_advantages, agent_advantages)
|
| 79 |
+
), "Number of advantages must match in order to sum them up."
|
| 80 |
+
|
| 81 |
+
# Get padded tensors (advantage alignment is invariant to padding)
|
| 82 |
+
lengths = torch.tensor(
|
| 83 |
+
[len(t) for t in agent_advantages],
|
| 84 |
+
device=self.device,
|
| 85 |
+
dtype=torch.long,
|
| 86 |
+
)
|
| 87 |
+
padded_main_advantages = pad_sequence(
|
| 88 |
+
agent_advantages, batch_first=True, padding_value=0.0
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
padded_co_agent_advantages = pad_sequence(
|
| 92 |
+
co_agent_advantages, batch_first=True, padding_value=0.0
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
# Create training batch data
|
| 96 |
+
sum_of_ad_credits = padded_main_advantages + padded_co_agent_advantages
|
| 97 |
+
self.rollout_tally.add_metric(
|
| 98 |
+
path=["sum_of_ad_credits"],
|
| 99 |
+
rollout_tally_item=RolloutTallyItem(
|
| 100 |
+
crn_ids=agent_data.main_data.crn_ids,
|
| 101 |
+
rollout_ids=agent_data.main_data.rollout_ids,
|
| 102 |
+
agent_ids=agent_data.main_data.agent_ids,
|
| 103 |
+
metric_matrix=sum_of_ad_credits,
|
| 104 |
+
),
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
if not self.skip_discounted_state_visitation:
|
| 108 |
+
sum_of_ad_credits = get_discounted_state_visitation_credits(
|
| 109 |
+
sum_of_ad_credits,
|
| 110 |
+
self.discount_factor,
|
| 111 |
+
)
|
| 112 |
+
self.rollout_tally.add_metric(
|
| 113 |
+
path=["discounted_state_visitation_credits"],
|
| 114 |
+
rollout_tally_item=RolloutTallyItem(
|
| 115 |
+
crn_ids=agent_data.main_data.crn_ids,
|
| 116 |
+
rollout_ids=agent_data.main_data.rollout_ids,
|
| 117 |
+
agent_ids=agent_data.main_data.agent_ids,
|
| 118 |
+
metric_matrix=sub_tensors[
|
| 119 |
+
"discounted_state_visitation_credits"
|
| 120 |
+
],
|
| 121 |
+
),
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
# Slice back to jagged and convert to tokenwise credits
|
| 125 |
+
sum_of_ad_credits = [sum_of_ad_credits[i, : lengths[i]] for i in range(B)]
|
| 126 |
+
self.training_data[agent_id] = agent_data.main_data
|
| 127 |
+
self.training_data[agent_id].batch_credits = sum_of_ad_credits
|
src_code_for_reproducibility/training/training_data_utils.py
ADDED
|
@@ -0,0 +1,394 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
from typing import Literal, Optional, Tuple
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from torch.nn.utils.rnn import pad_sequence
|
| 6 |
+
|
| 7 |
+
from mllm.markov_games.rollout_tree import (
|
| 8 |
+
ChatTurn,
|
| 9 |
+
RolloutTreeBranchNode,
|
| 10 |
+
RolloutTreeNode,
|
| 11 |
+
RolloutTreeRootNode,
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@dataclass
|
| 16 |
+
class AdvantagePacket:
|
| 17 |
+
agent_id: str
|
| 18 |
+
rollout_ids: torch.IntTensor # (B,)
|
| 19 |
+
# list-of-tensors
|
| 20 |
+
main_advantages: list[torch.FloatTensor]
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class TrainingChatTurn:
|
| 24 |
+
# TODO: simplify by making this a child of ChatTurn
|
| 25 |
+
"""
|
| 26 |
+
This class contains the chat turns for a single agent.
|
| 27 |
+
It is like ChatTurn, but with the time step added.
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
def __init__(
|
| 31 |
+
self,
|
| 32 |
+
time_step: int,
|
| 33 |
+
role: str,
|
| 34 |
+
agent_id: str,
|
| 35 |
+
content: str,
|
| 36 |
+
chat_template_token_ids: list[int],
|
| 37 |
+
reasoning_content: str,
|
| 38 |
+
is_state_end: bool,
|
| 39 |
+
out_token_ids: Optional[list[int]] = None,
|
| 40 |
+
log_probs: Optional[list[float]] = None,
|
| 41 |
+
) -> None:
|
| 42 |
+
self.time_step = time_step
|
| 43 |
+
self.role = role
|
| 44 |
+
self.agent_id = agent_id
|
| 45 |
+
self.content = content
|
| 46 |
+
self.chat_template_token_ids = chat_template_token_ids
|
| 47 |
+
self.reasoning_content = reasoning_content
|
| 48 |
+
self.is_state_end = is_state_end
|
| 49 |
+
self.out_token_ids = out_token_ids
|
| 50 |
+
self.log_probs = log_probs
|
| 51 |
+
|
| 52 |
+
def dict(self):
|
| 53 |
+
return {
|
| 54 |
+
"time_step": self.time_step,
|
| 55 |
+
"role": self.role,
|
| 56 |
+
"agent_id": self.agent_id,
|
| 57 |
+
"content": self.content,
|
| 58 |
+
"chat_template_token_ids": self.chat_template_token_ids,
|
| 59 |
+
"reasoning_content": self.reasoning_content,
|
| 60 |
+
"is_state_end": self.is_state_end,
|
| 61 |
+
"out_token_ids": self.out_token_ids,
|
| 62 |
+
"log_probs": self.log_probs,
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def get_main_chat_list_and_rewards(
|
| 67 |
+
agent_id: str, root: RolloutTreeRootNode | RolloutTreeNode
|
| 68 |
+
) -> Tuple[list[TrainingChatTurn], torch.FloatTensor]:
|
| 69 |
+
"""
|
| 70 |
+
This method traverses a rollout tree and returns a the list of ChatTurn
|
| 71 |
+
for an agent. If it encounters a branch node, it follows the main path.
|
| 72 |
+
"""
|
| 73 |
+
# TODO; extend for all trees, not just linear
|
| 74 |
+
if isinstance(root, RolloutTreeRootNode):
|
| 75 |
+
current_node = root.child
|
| 76 |
+
else:
|
| 77 |
+
current_node = root
|
| 78 |
+
|
| 79 |
+
chat = []
|
| 80 |
+
rewards = []
|
| 81 |
+
while current_node is not None:
|
| 82 |
+
if isinstance(current_node, RolloutTreeBranchNode):
|
| 83 |
+
current_node = current_node.main_child
|
| 84 |
+
reward: float = current_node.step_log.simulation_step_log.rewards[agent_id]
|
| 85 |
+
rewards.append(reward)
|
| 86 |
+
chat_turns: list[TrainingChatTurn] = current_node.step_log.action_logs[
|
| 87 |
+
agent_id
|
| 88 |
+
].chat_turns
|
| 89 |
+
chat_turns = [
|
| 90 |
+
TrainingChatTurn(time_step=current_node.time_step, **turn.model_dump())
|
| 91 |
+
for turn in chat_turns
|
| 92 |
+
]
|
| 93 |
+
chat.extend(chat_turns)
|
| 94 |
+
current_node = current_node.child
|
| 95 |
+
return chat, torch.FloatTensor(rewards)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def get_tokenwise_credits(
|
| 99 |
+
# B := batch size, S := number of tokens / seq. length, T := number of states. `j` stands for jagged (see pytorch nested tensors.)
|
| 100 |
+
batch_timesteps: torch.IntTensor | torch.Tensor, # (B, jS),
|
| 101 |
+
batch_credits: torch.FloatTensor | torch.Tensor, # (B, jT)
|
| 102 |
+
) -> torch.FloatTensor | torch.Tensor: # (B, jS)
|
| 103 |
+
"""
|
| 104 |
+
TOWRITE
|
| 105 |
+
"""
|
| 106 |
+
# TODO vectorize this code
|
| 107 |
+
batch_token_credits = []
|
| 108 |
+
for credits, timesteps in zip(batch_credits, batch_timesteps):
|
| 109 |
+
token_credits = torch.zeros_like(
|
| 110 |
+
timesteps,
|
| 111 |
+
dtype=credits.dtype,
|
| 112 |
+
device=timesteps.device,
|
| 113 |
+
)
|
| 114 |
+
for idx, credit in enumerate(credits):
|
| 115 |
+
token_credits[timesteps == idx] = credit
|
| 116 |
+
batch_token_credits.append(token_credits)
|
| 117 |
+
return batch_token_credits
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
@dataclass
|
| 121 |
+
class TrajectoryBatch:
|
| 122 |
+
"""
|
| 123 |
+
Tensorized batch of trajectories using list-of-tensors for jagged dimensions.
|
| 124 |
+
"""
|
| 125 |
+
|
| 126 |
+
# B := batch size, S := number of tokens / seq. length, T := number of states.
|
| 127 |
+
rollout_ids: torch.IntTensor # (B,)
|
| 128 |
+
crn_ids: torch.IntTensor # (B,)
|
| 129 |
+
agent_ids: list[str] # (B,)
|
| 130 |
+
batch_input_ids: list[torch.LongTensor] # List[(jS,)]
|
| 131 |
+
batch_action_mask: list[torch.BoolTensor] # List[(jS,)]
|
| 132 |
+
batch_entropy_mask: list[torch.BoolTensor] # List[(jS,)]
|
| 133 |
+
batch_timesteps: list[torch.IntTensor] # List[(jS,)]
|
| 134 |
+
batch_state_ends_mask: list[torch.BoolTensor] # List[(jS,)]
|
| 135 |
+
batch_engine_log_probs: Optional[list[torch.FloatTensor]] # List[(jS,)]
|
| 136 |
+
batch_rewards: list[torch.FloatTensor] # List[(jT,)]
|
| 137 |
+
batch_credits: Optional[list[torch.FloatTensor]] = None # List[(jS,)]
|
| 138 |
+
|
| 139 |
+
def __post_init__(self):
|
| 140 |
+
"""
|
| 141 |
+
Validate per-sample consistency.
|
| 142 |
+
"""
|
| 143 |
+
B = self.rollout_ids.shape[0]
|
| 144 |
+
assert (
|
| 145 |
+
self.crn_ids.shape[0] == B
|
| 146 |
+
), "RNG IDs must have length equal to batch size."
|
| 147 |
+
assert (
|
| 148 |
+
len(self.agent_ids) == B
|
| 149 |
+
), "agent_ids must have length equal to batch size."
|
| 150 |
+
assert (
|
| 151 |
+
len(self.batch_input_ids)
|
| 152 |
+
== len(self.batch_action_mask)
|
| 153 |
+
== len(self.batch_entropy_mask)
|
| 154 |
+
== len(self.batch_timesteps)
|
| 155 |
+
== len(self.batch_state_ends_mask)
|
| 156 |
+
== len(self.batch_engine_log_probs)
|
| 157 |
+
== len(self.batch_rewards)
|
| 158 |
+
== B
|
| 159 |
+
), "Jagged lists must all have length equal to batch size."
|
| 160 |
+
|
| 161 |
+
for b in range(B):
|
| 162 |
+
nb_rewards = int(self.batch_rewards[b].shape[0])
|
| 163 |
+
nb_timesteps = int(torch.max(self.batch_timesteps[b]).item()) + 1
|
| 164 |
+
assert (
|
| 165 |
+
nb_rewards == nb_timesteps
|
| 166 |
+
), "Number of rewards and timesteps mismatch."
|
| 167 |
+
assert (
|
| 168 |
+
self.batch_input_ids[b].shape[0]
|
| 169 |
+
== self.batch_action_mask[b].shape[0]
|
| 170 |
+
== self.batch_entropy_mask[b].shape[0]
|
| 171 |
+
== self.batch_engine_log_probs[b].shape[0]
|
| 172 |
+
== self.batch_timesteps[b].shape[0]
|
| 173 |
+
), "Tensors must have the same shape along the jagged dimension."
|
| 174 |
+
assert (
|
| 175 |
+
int(self.batch_state_ends_mask[b].sum())
|
| 176 |
+
== self.batch_rewards[b].shape[0]
|
| 177 |
+
), "Number of rewards must match number of state ends."
|
| 178 |
+
|
| 179 |
+
"""
|
| 180 |
+
Entries:
|
| 181 |
+
Here, we ignore the batch dimension.
|
| 182 |
+
input_ids:
|
| 183 |
+
All of the tokens of both the user and the assistant, flattened.
|
| 184 |
+
action_mask:
|
| 185 |
+
Set to true on the tokens of the assistant (tokens generated by the model).
|
| 186 |
+
timesteps:
|
| 187 |
+
Therefore, max(timesteps) = Ns - 1.
|
| 188 |
+
state_ends_idx:
|
| 189 |
+
Indices of the tokens at which state descriptions end.
|
| 190 |
+
rewards:
|
| 191 |
+
rewards[t] := R_t(s_t, a_t)
|
| 192 |
+
Example:
|
| 193 |
+
position: "0 1 2 3 4 5 6 7 8 9 10 11 12 13 14"
|
| 194 |
+
input_ids: "U U U a a a U a U a a a U U U" (U := User, a := Assistant)
|
| 195 |
+
action_mask: "x x x ✓ ✓ ✓ x ✓ x ✓ ✓ ✓ x x x"
|
| 196 |
+
timestep: "0 0 0 0 0 0 1 1 1 1 1 1 2 2 2"
|
| 197 |
+
state_ends_dx: [2, 6, 14]
|
| 198 |
+
rewards: [r0, r1, r2]
|
| 199 |
+
"""
|
| 200 |
+
|
| 201 |
+
def __getitem__(self, key) -> "TrajectoryBatch":
|
| 202 |
+
if isinstance(key, slice):
|
| 203 |
+
return TrajectoryBatch(
|
| 204 |
+
rollout_ids=self.rollout_ids.__getitem__(key),
|
| 205 |
+
crn_ids=self.crn_ids.__getitem__(key),
|
| 206 |
+
agent_ids=self.agent_ids[key],
|
| 207 |
+
batch_input_ids=self.batch_input_ids[key],
|
| 208 |
+
batch_action_mask=self.batch_action_mask[key],
|
| 209 |
+
batch_entropy_mask=self.batch_entropy_mask[key],
|
| 210 |
+
batch_timesteps=self.batch_timesteps[key],
|
| 211 |
+
batch_state_ends_mask=self.batch_state_ends_mask[key],
|
| 212 |
+
batch_engine_log_probs=self.batch_engine_log_probs[key],
|
| 213 |
+
batch_rewards=self.batch_rewards[key],
|
| 214 |
+
batch_credits=self.batch_credits[key] if self.batch_credits else None,
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
def __len__(self):
|
| 218 |
+
return len(self.batch_input_ids)
|
| 219 |
+
|
| 220 |
+
def to(self, device):
|
| 221 |
+
self.rollout_ids = self.rollout_ids.to(device)
|
| 222 |
+
self.crn_ids = self.crn_ids.to(device)
|
| 223 |
+
self.batch_input_ids = [t.to(device) for t in self.batch_input_ids]
|
| 224 |
+
self.batch_action_mask = [t.to(device) for t in self.batch_action_mask]
|
| 225 |
+
self.batch_entropy_mask = [t.to(device) for t in self.batch_entropy_mask]
|
| 226 |
+
self.batch_timesteps = [t.to(device) for t in self.batch_timesteps]
|
| 227 |
+
self.batch_state_ends_mask = [t.to(device) for t in self.batch_state_ends_mask]
|
| 228 |
+
self.batch_engine_log_probs = [
|
| 229 |
+
t.to(device) for t in self.batch_engine_log_probs
|
| 230 |
+
]
|
| 231 |
+
self.batch_rewards = [t.to(device) for t in self.batch_rewards]
|
| 232 |
+
self.batch_credits = (
|
| 233 |
+
[t.to(device) for t in self.batch_credits] if self.batch_credits else None
|
| 234 |
+
)
|
| 235 |
+
|
| 236 |
+
def get_padded_tensors_for_critic(self):
|
| 237 |
+
"""
|
| 238 |
+
Returns:
|
| 239 |
+
padded_batch_input_ids: (B, P)
|
| 240 |
+
padded_batch_state_ends_mask: (B, P)
|
| 241 |
+
timestep_counts: (B,) tensor of ints indicating number of states per sample
|
| 242 |
+
"""
|
| 243 |
+
padded_batch_input_ids = pad_sequence(
|
| 244 |
+
self.batch_input_ids, batch_first=True, padding_value=0
|
| 245 |
+
)
|
| 246 |
+
padded_batch_state_ends_mask = pad_sequence(
|
| 247 |
+
self.batch_state_ends_mask, batch_first=True, padding_value=0
|
| 248 |
+
).bool()
|
| 249 |
+
# number of states equals number of True in state_ends_mask
|
| 250 |
+
timestep_counts = torch.tensor(
|
| 251 |
+
[int(mask.sum().item()) for mask in self.batch_state_ends_mask],
|
| 252 |
+
device=padded_batch_input_ids.device,
|
| 253 |
+
dtype=torch.long,
|
| 254 |
+
)
|
| 255 |
+
return padded_batch_input_ids, padded_batch_state_ends_mask, timestep_counts
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
timestep = int
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
@dataclass
|
| 262 |
+
class PaddedTensorTrainingBatch:
|
| 263 |
+
batch_input_ids: torch.LongTensor | torch.Tensor
|
| 264 |
+
batch_action_mask: torch.BoolTensor | torch.Tensor
|
| 265 |
+
batch_entropy_mask: Optional[torch.BoolTensor | torch.Tensor]
|
| 266 |
+
batch_credits: torch.FloatTensor | torch.Tensor
|
| 267 |
+
batch_engine_log_probs: torch.FloatTensor | torch.Tensor
|
| 268 |
+
batch_timesteps: torch.IntTensor | torch.Tensor
|
| 269 |
+
|
| 270 |
+
def __len__(self):
|
| 271 |
+
return self.batch_input_ids.shape[0]
|
| 272 |
+
|
| 273 |
+
def to(self, device):
|
| 274 |
+
self.batch_input_ids = self.batch_input_ids.to(device)
|
| 275 |
+
self.batch_action_mask = self.batch_action_mask.to(device)
|
| 276 |
+
self.batch_entropy_mask = self.batch_entropy_mask.to(device)
|
| 277 |
+
self.batch_credits = self.batch_credits.to(device)
|
| 278 |
+
self.batch_engine_log_probs = self.batch_engine_log_probs.to(device)
|
| 279 |
+
self.batch_timesteps = self.batch_timesteps.to(device)
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
@dataclass
|
| 283 |
+
class TrainingBatch:
|
| 284 |
+
rollout_ids: torch.IntTensor | torch.Tensor # (B,)
|
| 285 |
+
batch_input_ids: list[torch.LongTensor] # List[(jS,)]
|
| 286 |
+
batch_action_mask: list[torch.BoolTensor] # List[(jS,)]
|
| 287 |
+
batch_entropy_mask: Optional[list[torch.BoolTensor]] # List[(jS,)]
|
| 288 |
+
batch_credits: list[torch.FloatTensor] # List[(jS,)]
|
| 289 |
+
batch_engine_log_probs: list[torch.FloatTensor] # List[(jS,)]
|
| 290 |
+
batch_timesteps: list[torch.IntTensor] # List[(jS,)]
|
| 291 |
+
|
| 292 |
+
def __post_init__(self):
|
| 293 |
+
# Put everything in the right device
|
| 294 |
+
# self.rollout_ids = self.rollout_ids.to("cuda" if torch.cuda.is_available() else "cpu")
|
| 295 |
+
# self.batch_input_ids = self.batch_input_ids.to("cuda" if torch.cuda.is_available() else "cpu")
|
| 296 |
+
# self.batch_action_mask = self.batch_action_mask.to("cuda" if torch.cuda.is_available() else "cpu")
|
| 297 |
+
# self.batch_credits = self.batch_credits.to("cuda" if torch.cuda.is_available() else "cpu")
|
| 298 |
+
# Ensure batch dimension is present
|
| 299 |
+
assert (
|
| 300 |
+
len(self.batch_input_ids)
|
| 301 |
+
== len(self.batch_action_mask)
|
| 302 |
+
== len(self.batch_entropy_mask)
|
| 303 |
+
== len(self.batch_credits)
|
| 304 |
+
== len(self.batch_engine_log_probs)
|
| 305 |
+
== len(self.batch_timesteps)
|
| 306 |
+
== self.rollout_ids.shape[0]
|
| 307 |
+
), "Jagged lists must all have length equal to batch size."
|
| 308 |
+
for inp, mask, cred, engine_log_prob, timestep in zip(
|
| 309 |
+
self.batch_input_ids,
|
| 310 |
+
self.batch_action_mask,
|
| 311 |
+
self.batch_credits,
|
| 312 |
+
self.batch_engine_log_probs,
|
| 313 |
+
self.batch_timesteps,
|
| 314 |
+
):
|
| 315 |
+
assert (
|
| 316 |
+
inp.shape[0]
|
| 317 |
+
== mask.shape[0]
|
| 318 |
+
== cred.shape[0]
|
| 319 |
+
== engine_log_prob.shape[0]
|
| 320 |
+
== timestep.shape[0]
|
| 321 |
+
), "Tensors must have the same shapes along the jagged dimension."
|
| 322 |
+
|
| 323 |
+
def __getitem__(self, key) -> "TrainingBatch":
|
| 324 |
+
if isinstance(key, slice):
|
| 325 |
+
return TrainingBatch(
|
| 326 |
+
rollout_ids=self.rollout_ids.__getitem__(key),
|
| 327 |
+
batch_input_ids=self.batch_input_ids[key],
|
| 328 |
+
batch_action_mask=self.batch_action_mask[key],
|
| 329 |
+
batch_entropy_mask=self.batch_entropy_mask[key],
|
| 330 |
+
batch_credits=self.batch_credits[key],
|
| 331 |
+
batch_engine_log_probs=self.batch_engine_log_probs[key],
|
| 332 |
+
batch_timesteps=self.batch_timesteps[key],
|
| 333 |
+
)
|
| 334 |
+
|
| 335 |
+
def __len__(self):
|
| 336 |
+
return len(self.batch_input_ids)
|
| 337 |
+
|
| 338 |
+
def to(self, device):
|
| 339 |
+
self.rollout_ids = self.rollout_ids.to(device)
|
| 340 |
+
self.batch_input_ids = [t.to(device) for t in self.batch_input_ids]
|
| 341 |
+
self.batch_action_mask = [t.to(device) for t in self.batch_action_mask]
|
| 342 |
+
self.batch_entropy_mask = [t.to(device) for t in self.batch_entropy_mask]
|
| 343 |
+
self.batch_credits = [t.to(device) for t in self.batch_credits]
|
| 344 |
+
self.batch_engine_log_probs = [
|
| 345 |
+
t.to(device) for t in self.batch_engine_log_probs
|
| 346 |
+
]
|
| 347 |
+
self.batch_timesteps = [t.to(device) for t in self.batch_timesteps]
|
| 348 |
+
|
| 349 |
+
def get_padded_tensors(self, padding: float = 0.0):
|
| 350 |
+
"""
|
| 351 |
+
TOWRITE
|
| 352 |
+
Always pad to the right.
|
| 353 |
+
"""
|
| 354 |
+
padded_batch_input_ids = pad_sequence(
|
| 355 |
+
self.batch_input_ids, batch_first=True, padding_value=int(padding)
|
| 356 |
+
)
|
| 357 |
+
padded_batch_action_mask = pad_sequence(
|
| 358 |
+
[m.to(dtype=torch.bool) for m in self.batch_action_mask],
|
| 359 |
+
batch_first=True,
|
| 360 |
+
padding_value=False,
|
| 361 |
+
)
|
| 362 |
+
padded_batch_entropy_mask = pad_sequence(
|
| 363 |
+
self.batch_entropy_mask, batch_first=True, padding_value=False
|
| 364 |
+
)
|
| 365 |
+
padded_batch_credits = pad_sequence(
|
| 366 |
+
self.batch_credits, batch_first=True, padding_value=float(padding)
|
| 367 |
+
)
|
| 368 |
+
padded_batch_engine_log_probs = pad_sequence(
|
| 369 |
+
self.batch_engine_log_probs, batch_first=True, padding_value=float(padding)
|
| 370 |
+
)
|
| 371 |
+
padded_batch_timesteps = pad_sequence(
|
| 372 |
+
self.batch_timesteps, batch_first=True, padding_value=0
|
| 373 |
+
)
|
| 374 |
+
|
| 375 |
+
return PaddedTensorTrainingBatch(
|
| 376 |
+
padded_batch_input_ids,
|
| 377 |
+
padded_batch_action_mask,
|
| 378 |
+
padded_batch_entropy_mask,
|
| 379 |
+
padded_batch_credits,
|
| 380 |
+
padded_batch_engine_log_probs,
|
| 381 |
+
padded_batch_timesteps,
|
| 382 |
+
)
|
| 383 |
+
|
| 384 |
+
def append(self, other: "TrainingBatch"):
|
| 385 |
+
self.rollout_ids = torch.cat([self.rollout_ids, other.rollout_ids])
|
| 386 |
+
self.batch_input_ids.extend(other.batch_input_ids)
|
| 387 |
+
self.batch_action_mask.extend(other.batch_action_mask)
|
| 388 |
+
self.batch_entropy_mask.extend(other.batch_entropy_mask)
|
| 389 |
+
self.batch_credits.extend(other.batch_credits)
|
| 390 |
+
self.batch_engine_log_probs.extend(other.batch_engine_log_probs)
|
| 391 |
+
self.batch_timesteps.extend(other.batch_timesteps)
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
timestep = int
|
src_code_for_reproducibility/utils/__init__.py
ADDED
|
File without changes
|
src_code_for_reproducibility/utils/dict_get_path.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
def get_from_nested_dict(a:dict, path) -> any:
|
| 3 |
+
# path is string or list of string
|
| 4 |
+
try:
|
| 5 |
+
if isinstance(path, str):
|
| 6 |
+
return a[path]
|
| 7 |
+
else:
|
| 8 |
+
for p in path:
|
| 9 |
+
a = a[p]
|
| 10 |
+
return a
|
| 11 |
+
except Exception:
|
| 12 |
+
return None
|
src_code_for_reproducibility/utils/format_time.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def format_time(seconds):
|
| 2 |
+
if seconds >= 3600:
|
| 3 |
+
return f"{int(seconds // 3600)}h {int((seconds % 3600) // 60)}m {int(seconds % 60)}s"
|
| 4 |
+
elif seconds >= 60:
|
| 5 |
+
return f"{int(seconds // 60)}m {int(seconds % 60)}s"
|
| 6 |
+
else:
|
| 7 |
+
return f"{int(seconds)}s"
|
src_code_for_reproducibility/utils/gather_training_stats.py
ADDED
|
@@ -0,0 +1,257 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import csv
|
| 3 |
+
import gc
|
| 4 |
+
import json
|
| 5 |
+
import logging
|
| 6 |
+
import os
|
| 7 |
+
import pickle
|
| 8 |
+
import random
|
| 9 |
+
import re
|
| 10 |
+
import subprocess
|
| 11 |
+
import sys
|
| 12 |
+
import time
|
| 13 |
+
from datetime import datetime
|
| 14 |
+
from statistics import mean
|
| 15 |
+
from typing import Any, Dict
|
| 16 |
+
|
| 17 |
+
import hydra
|
| 18 |
+
import matplotlib.pyplot as plt
|
| 19 |
+
import numpy as np
|
| 20 |
+
import pandas as pd
|
| 21 |
+
import torch
|
| 22 |
+
from omegaconf import OmegaConf
|
| 23 |
+
|
| 24 |
+
from mllm.training.tally_metrics import Tally
|
| 25 |
+
from mllm.utils.stat_pack import StatPack
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def get_from_nested_dict(dictio: dict, path: list[str]):
|
| 29 |
+
for sp in path[:-1]:
|
| 30 |
+
dictio = dictio[sp]
|
| 31 |
+
return dictio.get(path[-1])
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def set_at_path(dictio: dict, path: list[str], value):
|
| 35 |
+
for sp in path[:-1]:
|
| 36 |
+
if sp not in dictio:
|
| 37 |
+
dictio[sp] = {}
|
| 38 |
+
dictio = dictio[sp]
|
| 39 |
+
dictio[path[-1]] = value
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def produce_tabular_render(inpath: str, outpath: str = None):
|
| 43 |
+
"""
|
| 44 |
+
TODO: docstring
|
| 45 |
+
"""
|
| 46 |
+
with open(inpath, "r") as f:
|
| 47 |
+
data = json.load(f)
|
| 48 |
+
rollout_paths = data.keys()
|
| 49 |
+
for rollout_path in rollout_paths:
|
| 50 |
+
if outpath is None:
|
| 51 |
+
m_path = rollout_path.replace("/", "|")
|
| 52 |
+
m_path = m_path.replace(".json", "")
|
| 53 |
+
m_path = (
|
| 54 |
+
os.path.split(inpath)[0]
|
| 55 |
+
+ "/contextualized_tabular_renders/"
|
| 56 |
+
+ m_path
|
| 57 |
+
+ "_tabular_render.render.csv"
|
| 58 |
+
)
|
| 59 |
+
# import pdb; pdb.set_trace()
|
| 60 |
+
os.makedirs(os.path.split(m_path)[0], exist_ok=True)
|
| 61 |
+
metrics = data[rollout_path]
|
| 62 |
+
d = {k: [] for k in metrics[0].keys()}
|
| 63 |
+
for m in metrics:
|
| 64 |
+
for k, v in m.items():
|
| 65 |
+
d[k].append(v)
|
| 66 |
+
d = pd.DataFrame(d)
|
| 67 |
+
d.to_csv(m_path)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def get_metric_paths(data: list[dict]):
|
| 71 |
+
d = data[0]
|
| 72 |
+
paths = []
|
| 73 |
+
|
| 74 |
+
def traverse_dict(d, current_path=[]):
|
| 75 |
+
for key, value in d.items():
|
| 76 |
+
new_path = current_path + [key]
|
| 77 |
+
if isinstance(value, dict):
|
| 78 |
+
traverse_dict(value, new_path)
|
| 79 |
+
else:
|
| 80 |
+
paths.append(new_path)
|
| 81 |
+
|
| 82 |
+
traverse_dict(d)
|
| 83 |
+
return paths
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def print_metric_paths(data: list[dict]):
|
| 87 |
+
paths = get_metric_paths(data)
|
| 88 |
+
for p in paths:
|
| 89 |
+
print(p)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def get_metric_iteration_list(data: list[dict], metric_path: list[str]):
|
| 93 |
+
if isinstance(metric_path, str):
|
| 94 |
+
metric_path = [metric_path]
|
| 95 |
+
sgl = []
|
| 96 |
+
for d in data:
|
| 97 |
+
sgl.append(get_from_nested_dict(d, metric_path))
|
| 98 |
+
return sgl
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def to_1d_numeric(x):
|
| 102 |
+
"""Return a 1-D float array (or None if not numeric). Accepts scalars, numpy arrays, or nested list/tuple of them."""
|
| 103 |
+
if x is None:
|
| 104 |
+
return None
|
| 105 |
+
if isinstance(x, (int, float, np.number)):
|
| 106 |
+
return np.array([float(x)], dtype=float)
|
| 107 |
+
if isinstance(x, np.ndarray):
|
| 108 |
+
try:
|
| 109 |
+
return x.astype(float).ravel()
|
| 110 |
+
except Exception:
|
| 111 |
+
return None
|
| 112 |
+
if isinstance(x, (list, tuple)):
|
| 113 |
+
parts = []
|
| 114 |
+
for e in x:
|
| 115 |
+
arr = to_1d_numeric(e)
|
| 116 |
+
if arr is not None and arr.size > 0:
|
| 117 |
+
parts.append(arr)
|
| 118 |
+
if parts:
|
| 119 |
+
return np.concatenate(parts)
|
| 120 |
+
return None
|
| 121 |
+
return None
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def get_single_metric_vector(data, metric_path, iterations=None):
|
| 125 |
+
if isinstance(metric_path, str):
|
| 126 |
+
metric_path = [metric_path]
|
| 127 |
+
if iterations == None:
|
| 128 |
+
iterations = len(data)
|
| 129 |
+
vecs = []
|
| 130 |
+
for d in data:
|
| 131 |
+
ar = get_from_nested_dict(d, metric_path)
|
| 132 |
+
arr = to_1d_numeric(ar)
|
| 133 |
+
if arr is not None:
|
| 134 |
+
vecs.append(arr)
|
| 135 |
+
|
| 136 |
+
return np.concatenate(vecs) if vecs else np.empty(0, dtype=float)
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def _load_metrics_file(file_path: str):
|
| 140 |
+
if not (file_path.endswith(".tally.pkl") or file_path.endswith(".pkl")):
|
| 141 |
+
raise ValueError("Only *.tally.pkl files are supported.")
|
| 142 |
+
import pickle
|
| 143 |
+
|
| 144 |
+
with open(file_path, "rb") as f:
|
| 145 |
+
tree = pickle.load(f)
|
| 146 |
+
return tree
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def get_leaf_items(array_tally: dict, prefix: list[str] = None):
|
| 150 |
+
if prefix is None:
|
| 151 |
+
prefix = []
|
| 152 |
+
for key, value in array_tally.items():
|
| 153 |
+
next_prefix = prefix + [str(key)]
|
| 154 |
+
if isinstance(value, dict):
|
| 155 |
+
yield from get_leaf_items(value, next_prefix)
|
| 156 |
+
else:
|
| 157 |
+
yield next_prefix, value
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def _sanitize_filename_part(part: str) -> str:
|
| 161 |
+
s = part.replace("/", "|")
|
| 162 |
+
s = s.replace(" ", "_")
|
| 163 |
+
return s
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
def render_rt_tally_pkl_to_csvs(pkl_path: str, outdir: str):
|
| 167 |
+
"""
|
| 168 |
+
This method takes care of tokenwise logging.
|
| 169 |
+
"""
|
| 170 |
+
with open(pkl_path, "rb") as f:
|
| 171 |
+
payload = pickle.load(f)
|
| 172 |
+
# Backward compatibility: older tallies stored the dict directly
|
| 173 |
+
if isinstance(payload, dict) and "array_tally" in payload:
|
| 174 |
+
array_tally = payload.get("array_tally", {})
|
| 175 |
+
else:
|
| 176 |
+
array_tally = payload
|
| 177 |
+
|
| 178 |
+
os.makedirs(outdir, exist_ok=True)
|
| 179 |
+
trainer_id = os.path.basename(pkl_path).replace(".rt_tally.pkl", "")
|
| 180 |
+
for path_list, rollout_tally_items in get_leaf_items(array_tally):
|
| 181 |
+
# Create file and initiate writer
|
| 182 |
+
path_part = ".".join(_sanitize_filename_part(p) for p in path_list)
|
| 183 |
+
filename = f"{trainer_id}__{path_part}.render.csv"
|
| 184 |
+
out_path = os.path.join(outdir, filename)
|
| 185 |
+
|
| 186 |
+
# Write metric rows to CSV
|
| 187 |
+
with open(out_path, "w", newline="") as f:
|
| 188 |
+
writer = csv.writer(f)
|
| 189 |
+
|
| 190 |
+
# Write header row - need to determine metric column count from first rollout_tally_item
|
| 191 |
+
first_item = rollout_tally_items[0]
|
| 192 |
+
metric_cols = (
|
| 193 |
+
first_item.metric_matrix.shape[1]
|
| 194 |
+
if first_item.metric_matrix.ndim > 1
|
| 195 |
+
else 1
|
| 196 |
+
)
|
| 197 |
+
header = ["agent_id", "crn_id", "rollout_id"] + [
|
| 198 |
+
f"t_{i}" for i in range(metric_cols)
|
| 199 |
+
]
|
| 200 |
+
writer.writerow(header)
|
| 201 |
+
|
| 202 |
+
for rollout_tally_item in rollout_tally_items:
|
| 203 |
+
crn_ids = rollout_tally_item.crn_ids
|
| 204 |
+
rollout_ids = rollout_tally_item.rollout_ids
|
| 205 |
+
agent_ids = rollout_tally_item.agent_ids
|
| 206 |
+
metric_matrix = rollout_tally_item.metric_matrix
|
| 207 |
+
for i in range(metric_matrix.shape[0]):
|
| 208 |
+
row_vals = metric_matrix[i].reshape(-1)
|
| 209 |
+
# Convert row_vals to a list to avoid numpy concatenation issues
|
| 210 |
+
row_vals = (
|
| 211 |
+
row_vals.tolist()
|
| 212 |
+
if hasattr(row_vals, "tolist")
|
| 213 |
+
else list(row_vals)
|
| 214 |
+
)
|
| 215 |
+
row_prefix = [
|
| 216 |
+
agent_ids[i],
|
| 217 |
+
crn_ids[i],
|
| 218 |
+
rollout_ids[i],
|
| 219 |
+
]
|
| 220 |
+
writer.writerow(row_prefix + row_vals)
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
def tally_to_stat_pack(tally: Dict[str, Any]):
|
| 224 |
+
stat_pack = StatPack()
|
| 225 |
+
if "array_tally" in tally:
|
| 226 |
+
tally = tally["array_tally"]
|
| 227 |
+
|
| 228 |
+
# backward compatibility: will remove later, flatten keys in tally
|
| 229 |
+
def get_from_nested_dict(dictio: dict, path: list[str]):
|
| 230 |
+
for sp in path[:-1]:
|
| 231 |
+
dictio = dictio[sp]
|
| 232 |
+
return dictio.get(path[-1])
|
| 233 |
+
|
| 234 |
+
def get_metric_paths(tally: dict):
|
| 235 |
+
paths = []
|
| 236 |
+
|
| 237 |
+
def traverse_dict(tally, current_path=[]):
|
| 238 |
+
for key, value in tally.items():
|
| 239 |
+
new_path = current_path + [key]
|
| 240 |
+
if isinstance(value, dict):
|
| 241 |
+
traverse_dict(value, new_path)
|
| 242 |
+
else:
|
| 243 |
+
paths.append(new_path)
|
| 244 |
+
|
| 245 |
+
traverse_dict(tally)
|
| 246 |
+
return paths
|
| 247 |
+
|
| 248 |
+
paths = get_metric_paths(tally)
|
| 249 |
+
modified_tally = {}
|
| 250 |
+
for p in paths:
|
| 251 |
+
val = get_from_nested_dict(tally, p)
|
| 252 |
+
modified_tally["_".join(p)] = np.mean(val)
|
| 253 |
+
del tally
|
| 254 |
+
tally = modified_tally
|
| 255 |
+
for key, value in tally.items():
|
| 256 |
+
stat_pack.add_stat(key, value)
|
| 257 |
+
return stat_pack
|
src_code_for_reproducibility/utils/get_coagent_id.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
def get_coagent_id(ids: list[str], agent_id:str) -> str | None:
|
| 3 |
+
for id in ids:
|
| 4 |
+
if id != agent_id: return id
|
src_code_for_reproducibility/utils/get_stochastic_game_lengths.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
def get_stochastic_game_lengths(
|
| 4 |
+
max_length,
|
| 5 |
+
nb_games,
|
| 6 |
+
continuation_prob,
|
| 7 |
+
same_length_batch=False
|
| 8 |
+
):
|
| 9 |
+
"""
|
| 10 |
+
Generates stochastic game lengths based on a geometric distribution.
|
| 11 |
+
|
| 12 |
+
Args:
|
| 13 |
+
max_length (int): The maximum length a game can have.
|
| 14 |
+
nb_games (int): The number of games to generate lengths for.
|
| 15 |
+
continuation_prob (float): The probability of the game continuing after each round.
|
| 16 |
+
same_length_batch (bool): If True, all games will have the same length.
|
| 17 |
+
|
| 18 |
+
Returns:
|
| 19 |
+
Array: An array of game lengths.
|
| 20 |
+
"""
|
| 21 |
+
if continuation_prob == 1:
|
| 22 |
+
return [max_length] * nb_games
|
| 23 |
+
if same_length_batch:
|
| 24 |
+
length = np.random.geometric(1 - continuation_prob, 1)
|
| 25 |
+
game_lengths = np.repeat(length, nb_games)
|
| 26 |
+
else:
|
| 27 |
+
game_lengths = np.random.geometric(1 - continuation_prob, nb_games)
|
| 28 |
+
|
| 29 |
+
game_lengths = np.where(game_lengths > max_length, max_length, game_lengths)
|
| 30 |
+
return game_lengths.tolist()
|
src_code_for_reproducibility/utils/kill_sglang.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import psutil
|
| 2 |
+
import signal
|
| 3 |
+
|
| 4 |
+
target_name = "sglang::scheduler"
|
| 5 |
+
killed = []
|
| 6 |
+
|
| 7 |
+
def kill_sglang():
|
| 8 |
+
for proc in psutil.process_iter(['pid', 'name', 'cmdline']):
|
| 9 |
+
try:
|
| 10 |
+
# Some processes may not have a name or cmdline
|
| 11 |
+
cmdline = " ".join(proc.info['cmdline']) if proc.info['cmdline'] else ""
|
| 12 |
+
if target_name in cmdline:
|
| 13 |
+
print(f"Killing PID {proc.pid}: {cmdline}")
|
| 14 |
+
proc.send_signal(signal.SIGKILL)
|
| 15 |
+
killed.append(proc.pid)
|
| 16 |
+
except (psutil.NoSuchProcess, psutil.AccessDenied):
|
| 17 |
+
pass
|
src_code_for_reproducibility/utils/output_source_code.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def output_source_code(model, output_path: str) -> None:
|
| 2 |
+
"""
|
| 3 |
+
Outputs the source code of the model to the given path.
|
| 4 |
+
"""
|
| 5 |
+
with open(output_path, "w") as f:
|
| 6 |
+
f.write(model.source_code)
|
src_code_for_reproducibility/utils/resource_context.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import time
|
| 3 |
+
from contextlib import contextmanager
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def vram_usage():
|
| 9 |
+
output = ""
|
| 10 |
+
for i in range(torch.cuda.device_count()):
|
| 11 |
+
gpu_memory_allocated = torch.cuda.memory_allocated(i) / (
|
| 12 |
+
1024**3
|
| 13 |
+
) # Convert bytes to GB
|
| 14 |
+
gpu_memory_reserved = torch.cuda.memory_reserved(i) / (
|
| 15 |
+
1024**3
|
| 16 |
+
) # Convert bytes to GB
|
| 17 |
+
output += f"GPU {i}: Memory Allocated: {gpu_memory_allocated:.2f} GB, Memory Reserved: {gpu_memory_reserved:.2f} GB"
|
| 18 |
+
return output
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def ram_usage():
|
| 22 |
+
import psutil
|
| 23 |
+
|
| 24 |
+
process = psutil.Process()
|
| 25 |
+
memory_info = process.memory_info()
|
| 26 |
+
ram_used = memory_info.rss / (1024**3) # Convert bytes to GB
|
| 27 |
+
return f"RAM Usage: {ram_used:.2f} GB"
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
@contextmanager
|
| 31 |
+
def resource_logger_context(logger: logging.Logger, task_description: str):
|
| 32 |
+
"""
|
| 33 |
+
Context manager to log the resource usage of the current task.
|
| 34 |
+
Args:
|
| 35 |
+
logger: The logger to use to log the resource usage.
|
| 36 |
+
task_description: The description of the task to log.
|
| 37 |
+
Returns:
|
| 38 |
+
None
|
| 39 |
+
"""
|
| 40 |
+
try:
|
| 41 |
+
initial_time = time.time()
|
| 42 |
+
# Assume CUDA is available and use device 0 only
|
| 43 |
+
total_mem_bytes = torch.cuda.get_device_properties(0).total_memory
|
| 44 |
+
initial_total_bytes = (
|
| 45 |
+
torch.cuda.memory_allocated(0) + torch.cuda.memory_reserved(0)
|
| 46 |
+
)
|
| 47 |
+
torch.cuda.reset_peak_memory_stats(0)
|
| 48 |
+
yield None
|
| 49 |
+
finally:
|
| 50 |
+
final_time = time.time()
|
| 51 |
+
# Ensure kernels within the block are accounted for
|
| 52 |
+
torch.cuda.synchronize()
|
| 53 |
+
|
| 54 |
+
# Compute metrics
|
| 55 |
+
final_allocated_bytes = torch.cuda.memory_allocated(0)
|
| 56 |
+
final_reserved_bytes = torch.cuda.memory_reserved(0)
|
| 57 |
+
final_total_bytes = final_allocated_bytes + final_reserved_bytes
|
| 58 |
+
|
| 59 |
+
delta_vram_percent_total = (
|
| 60 |
+
100 * (final_total_bytes - initial_total_bytes) / total_mem_bytes
|
| 61 |
+
if total_mem_bytes
|
| 62 |
+
else 0.0
|
| 63 |
+
)
|
| 64 |
+
current_percent_vram_taken = (
|
| 65 |
+
100 * final_total_bytes / total_mem_bytes if total_mem_bytes else 0.0
|
| 66 |
+
)
|
| 67 |
+
block_peak_percent = (
|
| 68 |
+
100 * torch.cuda.max_memory_allocated(0) / total_mem_bytes
|
| 69 |
+
if total_mem_bytes
|
| 70 |
+
else 0.0
|
| 71 |
+
)
|
| 72 |
+
delta_time_str = time.strftime(
|
| 73 |
+
'%H:%M:%S', time.gmtime(final_time - initial_time)
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
logger.info(
|
| 77 |
+
f"For task: {task_description}, ΔVRAM % (total): {delta_vram_percent_total:.2f}%, Current % of VRAM taken: {current_percent_vram_taken:.2f}%, Block Peak % of device VRAM: {block_peak_percent:.2f}%, ΔTime: {delta_time_str}"
|
| 78 |
+
)
|
src_code_for_reproducibility/utils/rollout_tree_chat_htmls.py
ADDED
|
@@ -0,0 +1,664 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pathlib import Path
|
| 2 |
+
from typing import List
|
| 3 |
+
|
| 4 |
+
from mllm.utils.rollout_tree_gather_utils import *
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def html_from_chat_turns(chat_turns: List[ChatTurnLog]) -> str:
|
| 8 |
+
"""
|
| 9 |
+
Render chat turns as a single, wrapping sequence of messages in time order.
|
| 10 |
+
Keep badge and message bubble styles, include time on every badge and
|
| 11 |
+
include rewards on assistant badges. Each message is individually
|
| 12 |
+
hide/show by click; when hidden, only the badge remains and "(...)" is
|
| 13 |
+
shown inline (not inside a bubble).
|
| 14 |
+
"""
|
| 15 |
+
import html
|
| 16 |
+
import re as _re
|
| 17 |
+
|
| 18 |
+
# Prepare ordering: sort by (time_step, original_index) to keep stable order within same step
|
| 19 |
+
indexed_turns = list(enumerate(chat_turns))
|
| 20 |
+
indexed_turns.sort(key=lambda t: (t[1].time_step, t[0]))
|
| 21 |
+
assistant_agents = sorted({t.agent_id for t in chat_turns if t.role == "assistant"})
|
| 22 |
+
enable_split_view = len(assistant_agents) == 2
|
| 23 |
+
|
| 24 |
+
# CSS styles (simplified layout; no time-step or agent-column backgrounds)
|
| 25 |
+
css = """
|
| 26 |
+
<style>
|
| 27 |
+
:root {
|
| 28 |
+
--font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
|
| 29 |
+
--bg: #ffffff;
|
| 30 |
+
--text: #1c0b00;
|
| 31 |
+
--muted-text: #2C3E50;
|
| 32 |
+
--accent-muted: #BDC3C7;
|
| 33 |
+
--accent-muted-2: #D0D7DE;
|
| 34 |
+
--panel-bg: #F8FAFC;
|
| 35 |
+
--reward-color: #3a2e00; /* dark text for reward pill */
|
| 36 |
+
--font-size: 15px;
|
| 37 |
+
--small-font-size: 13px;
|
| 38 |
+
--group-label-font-size: 12px;
|
| 39 |
+
--border-width: 2px;
|
| 40 |
+
--corner-radius: 6px;
|
| 41 |
+
--pill-radius-left: 999px 0 0 999px;
|
| 42 |
+
--pill-radius-right: 0 999px 999px 0;
|
| 43 |
+
--inset-shadow: 0 1px 0 rgba(0,0,0,0.03) inset;
|
| 44 |
+
}
|
| 45 |
+
body {
|
| 46 |
+
font-family: var(--font-family);
|
| 47 |
+
margin: 16px;
|
| 48 |
+
background-color: var(--bg);
|
| 49 |
+
color: var(--text);
|
| 50 |
+
font-size: var(--font-size);
|
| 51 |
+
line-height: 1.6;
|
| 52 |
+
}
|
| 53 |
+
.messages-flow { display: block; }
|
| 54 |
+
.split-wrapper { display: flex; gap: 4px; align-items: flex-start; position: relative; }
|
| 55 |
+
.split-col { flex:1 1 0; min-width:0; }
|
| 56 |
+
/* In split view keep same inline density as linear view */
|
| 57 |
+
.split-col .chat-turn { display: inline; }
|
| 58 |
+
.split-wrapper.resizing { user-select: none; }
|
| 59 |
+
.split-resizer { width:4px; cursor: col-resize; flex:0 0 auto; align-self: stretch; position: relative; background: linear-gradient(90deg, rgba(224,230,235,0), var(--accent-muted-2) 30%, var(--accent-muted-2) 70%, rgba(224,230,235,0)); border-radius:2px; transition: background .15s ease, width .15s ease; }
|
| 60 |
+
.split-resizer:hover { background: linear-gradient(90deg, rgba(224,230,235,0), var(--accent-muted) 35%, var(--accent-muted) 65%, rgba(224,230,235,0)); }
|
| 61 |
+
.split-resizer.dragging { background: linear-gradient(90deg, rgba(224,230,235,0), var(--accent-muted) 25%, var(--accent-muted) 75%, rgba(224,230,235,0)); }
|
| 62 |
+
/* Inline reasoning (removed toggle to prevent layout shift on click) */
|
| 63 |
+
.reasoning-inline { display:inline; font-size:0.8em; font-style:italic; color:#555; white-space:pre-wrap; margin-right:4px; cursor:pointer; position:relative; }
|
| 64 |
+
.reasoning-inline .reasoning-text { display:inline; }
|
| 65 |
+
.reasoning-inline .reasoning-icon { display:inline-block; margin-right:2px; }
|
| 66 |
+
.reasoning-inline.collapsed .reasoning-text { display:none; }
|
| 67 |
+
.reasoning-inline.collapsed::after { content:'(...)'; font-style:italic; color:#777; margin-left:4px; }
|
| 68 |
+
.message-box .main-content { white-space:normal; }
|
| 69 |
+
/* tighten spacing */
|
| 70 |
+
.split-col .group-divider { margin:4px 0 2px 0; }
|
| 71 |
+
.toolbar {
|
| 72 |
+
display: flex;
|
| 73 |
+
align-items: center;
|
| 74 |
+
gap: 8px;
|
| 75 |
+
margin-bottom: 0;
|
| 76 |
+
font-size: var(--small-font-size);
|
| 77 |
+
max-height: 0;
|
| 78 |
+
overflow: hidden;
|
| 79 |
+
opacity: 0;
|
| 80 |
+
pointer-events: none;
|
| 81 |
+
transition: max-height 0.2s ease, opacity 0.2s ease;
|
| 82 |
+
}
|
| 83 |
+
.toolbar-wrap { position: sticky; top: 0; z-index: 10; background: var(--bg); }
|
| 84 |
+
.toolbar-hotzone { height: 6px; }
|
| 85 |
+
.toolbar-wrap:hover .toolbar { max-height: 200px; opacity: 1; pointer-events: auto; margin-bottom: 12px; }
|
| 86 |
+
.toolbar input[type="number"] {
|
| 87 |
+
width: 72px;
|
| 88 |
+
padding: 2px 6px;
|
| 89 |
+
border: 1px solid var(--accent-muted);
|
| 90 |
+
border-radius: var(--corner-radius);
|
| 91 |
+
background: var(--bg);
|
| 92 |
+
}
|
| 93 |
+
.toolbar button {
|
| 94 |
+
padding: 4px 8px;
|
| 95 |
+
border: 1px solid var(--accent-muted);
|
| 96 |
+
background: var(--panel-bg);
|
| 97 |
+
border-radius: var(--corner-radius);
|
| 98 |
+
cursor: pointer;
|
| 99 |
+
}
|
| 100 |
+
.chat-turn {
|
| 101 |
+
display: inline; /* inline like text */
|
| 102 |
+
background: transparent;
|
| 103 |
+
position: relative;
|
| 104 |
+
cursor: pointer;
|
| 105 |
+
}
|
| 106 |
+
/* No agent-specific background distinctions */
|
| 107 |
+
.turn-content {
|
| 108 |
+
white-space: normal;
|
| 109 |
+
color: var(--text);
|
| 110 |
+
font-size: var(--font-size);
|
| 111 |
+
display: inline; /* inline flow */
|
| 112 |
+
}
|
| 113 |
+
.chat-turn .agent-badge { margin-right: 0; vertical-align: baseline; }
|
| 114 |
+
.agent-badge {
|
| 115 |
+
display: inline;
|
| 116 |
+
position: relative;
|
| 117 |
+
border: var(--border-width) solid var(--accent-muted); /* slightly thicker */
|
| 118 |
+
border-radius: var(--pill-radius-left); /* round left and bottom-right */
|
| 119 |
+
font-size: var(--font-size);
|
| 120 |
+
color: var(--muted-text);
|
| 121 |
+
background: var(--panel-bg);
|
| 122 |
+
box-shadow: var(--inset-shadow);
|
| 123 |
+
line-height: 1.2;
|
| 124 |
+
border-right: 0;
|
| 125 |
+
}
|
| 126 |
+
/* Use flex on assistant badges to vertically center reward pill */
|
| 127 |
+
.chat-turn.role-assistant .agent-badge { display: inline-flex; align-items: center; }
|
| 128 |
+
.agent-badge::after {
|
| 129 |
+
content: none;
|
| 130 |
+
}
|
| 131 |
+
/* removed external separator; emoji is rendered inside message bubble */
|
| 132 |
+
.agent-name { font-weight: 700; }
|
| 133 |
+
.emoji-bw { filter: grayscale(100%); opacity: 0.95; font-size: var(--font-size); vertical-align: baseline; margin: 0; position: relative; top: -1px; line-height: 1; display: inline-block; }
|
| 134 |
+
.ts-badge {
|
| 135 |
+
position: relative;
|
| 136 |
+
display: inline;
|
| 137 |
+
border: var(--border-width) solid var(--accent-muted-2); /* slightly thicker */
|
| 138 |
+
border-radius: var(--corner-radius); /* not a pill */
|
| 139 |
+
font-size: var(--font-size);
|
| 140 |
+
# font-weight: 700;
|
| 141 |
+
color: var(--muted-text);
|
| 142 |
+
background: #F4F8FB; /* subtle tint */
|
| 143 |
+
# padding: 1px 6px; /* slight padding for visibility */
|
| 144 |
+
margin-right: 8px; /* small gap from following content */
|
| 145 |
+
pointer-events: auto; /* allow events so we can ignore them in JS */
|
| 146 |
+
}
|
| 147 |
+
/* Hide timestep badges when grouping by 1 */
|
| 148 |
+
.hide-ts-badges .ts-badge { display: none; }
|
| 149 |
+
/* Strong hide: completely hide collapsed turns */
|
| 150 |
+
.strong-hide .chat-turn.collapsed { display: none; }
|
| 151 |
+
.ts-badge::before {
|
| 152 |
+
content: "";
|
| 153 |
+
position: relative;
|
| 154 |
+
background: var(--accent-muted-2);
|
| 155 |
+
border-radius: 2px;
|
| 156 |
+
}
|
| 157 |
+
.agent-badge { margin-left: 6px; }
|
| 158 |
+
.message-box {
|
| 159 |
+
display: inline; /* inline bubble behaving like text */
|
| 160 |
+
font-size: var(--font-size);
|
| 161 |
+
border: var(--border-width) solid var(--accent-muted);
|
| 162 |
+
border-radius: var(--pill-radius-right); /* round left and bottom-right */
|
| 163 |
+
position: relative;
|
| 164 |
+
background: var(--bg);
|
| 165 |
+
vertical-align: baseline;
|
| 166 |
+
line-height: 1.2;
|
| 167 |
+
padding-left: 0;
|
| 168 |
+
border-left: 0;
|
| 169 |
+
}
|
| 170 |
+
.chat-turn.agent-alice.role-assistant .message-box::before { color: #0eb224; }
|
| 171 |
+
.chat-turn.agent-bob.role-assistant .message-box::before { color: #ef8323; }
|
| 172 |
+
.chat-turn.collapsed .message-box::before { display: none; }
|
| 173 |
+
/* Assistant bubble border colors by common agent names */
|
| 174 |
+
.chat-turn.agent-alice.role-assistant .message-box { border-color: #0eb224; }
|
| 175 |
+
.chat-turn.agent-bob.role-assistant .message-box { border-color: #ef8323; }
|
| 176 |
+
/* Tie badge and seam to agent color for a cohesive capsule, assistants only */
|
| 177 |
+
.chat-turn.agent-alice.role-assistant .agent-badge { border-color: #0eb224; background: rgba(14,178,36,0.08); }
|
| 178 |
+
.chat-turn.agent-alice.role-assistant .agent-badge::after { border-right-color: #0eb224; }
|
| 179 |
+
.chat-turn.agent-alice.role-assistant .turn-content::before { border-left-color: #0eb224; border-top-color: #0eb224; }
|
| 180 |
+
.chat-turn.agent-alice.role-assistant .message-box { border-color: #0eb224; }
|
| 181 |
+
|
| 182 |
+
.chat-turn.agent-bob.role-assistant .agent-badge { border-color: #ef8323; background: rgba(239,131,35,0.10); }
|
| 183 |
+
.chat-turn.agent-bob.role-assistant .agent-badge::after { border-right-color: #ef8323; }
|
| 184 |
+
.chat-turn.agent-bob.role-assistant .turn-content::before { border-left-color: #ef8323; border-top-color: #ef8323; }
|
| 185 |
+
.chat-turn.agent-bob.role-assistant .message-box { border-color: #ef8323; }
|
| 186 |
+
/* No colored agent-name; keep neutral */
|
| 187 |
+
.reward {
|
| 188 |
+
display: inline-flex;
|
| 189 |
+
align-items: center;
|
| 190 |
+
justify-content: center;
|
| 191 |
+
background: linear-gradient(90deg, #fffdf2 0%, #ffffff 75%);
|
| 192 |
+
color: #000000; /* full black */
|
| 193 |
+
font-weight: 600; /* slightly bolder */
|
| 194 |
+
font-family: "Inter", ui-sans-serif, system-ui, -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Oxygen, Ubuntu, Cantarell, "Fira Sans", "Droid Sans", "Helvetica Neue", Arial, "Noto Sans", sans-serif;
|
| 195 |
+
font-size: 10.5px;
|
| 196 |
+
letter-spacing: 0.15px;
|
| 197 |
+
line-height: 1;
|
| 198 |
+
padding: 0 4px 1px 4px; /* slight bottom pad for optical centering */
|
| 199 |
+
border-radius: 4px;
|
| 200 |
+
border: 1px solid #f4e6a8;
|
| 201 |
+
margin: 0 4px;
|
| 202 |
+
box-shadow: 0 0 0 1px rgba(255,255,255,0.55) inset, 0 1px 2px rgba(0,0,0,0.04);
|
| 203 |
+
}
|
| 204 |
+
.message-placeholder { display: none; color: #7f8c8d; font-style: italic; }
|
| 205 |
+
.chat-turn.collapsed .message-box { color: transparent; font-size: 0; display: inline-block; }
|
| 206 |
+
.chat-turn.collapsed .message-box::after { content: "(...)"; color: #7f8c8d; font-style: italic; font-size: var(--font-size); line-height: 1.2; }
|
| 207 |
+
.chat-turn.collapsed .agent-badge,
|
| 208 |
+
.chat-turn.collapsed .message-box { opacity: 0.3; }
|
| 209 |
+
/* Group divider - clearer and pretty */
|
| 210 |
+
.group-divider {
|
| 211 |
+
display: flex;
|
| 212 |
+
align-items: center;
|
| 213 |
+
gap: 8px;
|
| 214 |
+
width: 100%;
|
| 215 |
+
margin: 8px 0 4px 0;
|
| 216 |
+
position: relative;
|
| 217 |
+
}
|
| 218 |
+
.group-divider::before,
|
| 219 |
+
.group-divider::after {
|
| 220 |
+
content: "";
|
| 221 |
+
flex: 1 1 auto;
|
| 222 |
+
height: 2px;
|
| 223 |
+
background: linear-gradient(90deg, rgba(224,230,235,0), var(--accent-muted-2) 30%, var(--accent-muted-2) 70%, rgba(224,230,235,0));
|
| 224 |
+
}
|
| 225 |
+
.group-divider .group-label {
|
| 226 |
+
display: inline-block;
|
| 227 |
+
border: 1px solid var(--accent-muted);
|
| 228 |
+
border-radius: 999px;
|
| 229 |
+
padding: 2px 10px;
|
| 230 |
+
font-size: var(--group-label-font-size);
|
| 231 |
+
font-weight: 700;
|
| 232 |
+
color: var(--muted-text);
|
| 233 |
+
background: var(--bg);
|
| 234 |
+
box-shadow: var(--inset-shadow);
|
| 235 |
+
position: relative;
|
| 236 |
+
z-index: 1;
|
| 237 |
+
}
|
| 238 |
+
/* Enhance contrast for print / export */
|
| 239 |
+
body.split-mode .group-divider::before,
|
| 240 |
+
body.split-mode .group-divider::after {
|
| 241 |
+
background: linear-gradient(90deg, rgba(224,230,235,0), var(--accent-muted) 25%, var(--accent-muted) 75%, rgba(224,230,235,0));
|
| 242 |
+
}
|
| 243 |
+
.chat-turn .turn-content { position: relative; }
|
| 244 |
+
.chat-turn .turn-content::before {
|
| 245 |
+
content: none;
|
| 246 |
+
}
|
| 247 |
+
.chat-turn .agent-badge {
|
| 248 |
+
position: relative;
|
| 249 |
+
}
|
| 250 |
+
/* removed absolute-positioned emoji to prevent overlap */
|
| 251 |
+
</style>
|
| 252 |
+
"""
|
| 253 |
+
|
| 254 |
+
# HTML structure
|
| 255 |
+
html_parts = [
|
| 256 |
+
"<!DOCTYPE html>",
|
| 257 |
+
"<html>",
|
| 258 |
+
"<head>",
|
| 259 |
+
"<meta charset='UTF-8'>",
|
| 260 |
+
"<title>Chat Turns</title>",
|
| 261 |
+
css,
|
| 262 |
+
"<script>\n"
|
| 263 |
+
"document.addEventListener('DOMContentLoaded', function() {\n"
|
| 264 |
+
" const linearFlow = document.getElementById('flow-linear');\n"
|
| 265 |
+
" const splitFlow = document.getElementById('flow-split');\n"
|
| 266 |
+
" let splitViewOn = false;\n"
|
| 267 |
+
" function activeFlows() { return [splitViewOn && splitFlow ? splitFlow : null, linearFlow].filter(Boolean).filter(f => f.style.display !== 'none'); }\n"
|
| 268 |
+
" // State for range filtering and strong hide\n"
|
| 269 |
+
" let currentRangeStart = null;\n"
|
| 270 |
+
" let currentRangeEnd = null;\n"
|
| 271 |
+
" let strongHideOn = false;\n"
|
| 272 |
+
" document.body.addEventListener('click', function(e){\n"
|
| 273 |
+
" if (e.target.closest('.ts-badge')) { return; }\n"
|
| 274 |
+
" const r = e.target.closest('.reasoning-inline'); if (r) { e.stopPropagation(); r.classList.toggle('collapsed'); return; }\n"
|
| 275 |
+
" const turn = e.target.closest('.chat-turn');\n"
|
| 276 |
+
" if (turn) { e.stopPropagation(); turn.classList.toggle('collapsed'); }\n"
|
| 277 |
+
" });\n"
|
| 278 |
+
" // Reasoning handled via <details>, no JS required\n"
|
| 279 |
+
" function applyRangeFilter() {\n"
|
| 280 |
+
" for (const flow of activeFlows()) {\n"
|
| 281 |
+
" const turns = Array.from(flow.querySelectorAll('.chat-turn'));\n"
|
| 282 |
+
" for (const el of turns) {\n"
|
| 283 |
+
" const t = parseInt(el.getAttribute('data-time-step') || '0', 10);\n"
|
| 284 |
+
" const afterStart = (currentRangeStart === null) || (t >= currentRangeStart);\n"
|
| 285 |
+
" const beforeEnd = (currentRangeEnd === null) || (t <= currentRangeEnd);\n"
|
| 286 |
+
" el.style.display = (afterStart && beforeEnd) ? '' : 'none';\n"
|
| 287 |
+
" }\n"
|
| 288 |
+
" const dividers = Array.from(flow.querySelectorAll('.group-divider'));\n"
|
| 289 |
+
" for (const d of dividers) {\n"
|
| 290 |
+
" let anyVisible = false;\n"
|
| 291 |
+
" let el = d.nextElementSibling;\n"
|
| 292 |
+
" while (el && !el.classList.contains('group-divider')) {\n"
|
| 293 |
+
" if (el.classList.contains('chat-turn')) {\n"
|
| 294 |
+
" const disp = getComputedStyle(el).display;\n"
|
| 295 |
+
" if (disp !== 'none') { anyVisible = true; break; }\n"
|
| 296 |
+
" } else if (el.classList.contains('split-wrapper')) {\n"
|
| 297 |
+
" // Search descendants for any visible chat-turn\n"
|
| 298 |
+
" const turns = Array.from(el.querySelectorAll('.chat-turn'));\n"
|
| 299 |
+
" for (const tEl of turns) {\n"
|
| 300 |
+
" const disp2 = getComputedStyle(tEl).display;\n"
|
| 301 |
+
" if (disp2 !== 'none') { anyVisible = true; break; }\n"
|
| 302 |
+
" }\n"
|
| 303 |
+
" if (anyVisible) break;\n"
|
| 304 |
+
" }\n"
|
| 305 |
+
" el = el.nextElementSibling;\n"
|
| 306 |
+
" }\n"
|
| 307 |
+
" d.style.display = anyVisible ? '' : 'none';\n"
|
| 308 |
+
" }\n"
|
| 309 |
+
" }\n"
|
| 310 |
+
" }\n"
|
| 311 |
+
" function applyGrouping(n) {\n"
|
| 312 |
+
" function groupContainer(container, n) {\n"
|
| 313 |
+
" Array.from(container.querySelectorAll(':scope > .group-divider')).forEach(el => el.remove());\n"
|
| 314 |
+
" if (!n || n <= 0) { return; }\n"
|
| 315 |
+
" const turns = Array.from(container.querySelectorAll(':scope > .chat-turn'));\n"
|
| 316 |
+
" if (turns.length === 0) return;\n"
|
| 317 |
+
" const items = Array.from(container.children).filter(el => !el.classList.contains('group-divider'));\n"
|
| 318 |
+
" const frag = document.createDocumentFragment();\n"
|
| 319 |
+
" let lastGroup = -1;\n"
|
| 320 |
+
" for (const el of items) {\n"
|
| 321 |
+
" if (!el.classList.contains('chat-turn')) { frag.appendChild(el); continue; }\n"
|
| 322 |
+
" const t = parseInt(el.getAttribute('data-time-step') || '0', 10);\n"
|
| 323 |
+
" const g = Math.floor(t / n);\n"
|
| 324 |
+
" if (g !== lastGroup) {\n"
|
| 325 |
+
" const div = document.createElement('div');\n"
|
| 326 |
+
" div.className = 'group-divider';\n"
|
| 327 |
+
" const label = document.createElement('span');\n"
|
| 328 |
+
" label.className = 'group-label';\n"
|
| 329 |
+
" const roundIndex = g + 1;\n"
|
| 330 |
+
" label.textContent = `Round ${roundIndex}`;\n"
|
| 331 |
+
" div.appendChild(label);\n"
|
| 332 |
+
" frag.appendChild(div);\n"
|
| 333 |
+
" lastGroup = g;\n"
|
| 334 |
+
" }\n"
|
| 335 |
+
" frag.appendChild(el);\n"
|
| 336 |
+
" }\n"
|
| 337 |
+
" container.innerHTML = '';\n"
|
| 338 |
+
" container.appendChild(frag);\n"
|
| 339 |
+
" container.classList.toggle('hide-ts-badges', n === 1);\n"
|
| 340 |
+
" container.classList.toggle('strong-hide', strongHideOn);\n"
|
| 341 |
+
" }\n"
|
| 342 |
+
" for (const flow of activeFlows()) {\n"
|
| 343 |
+
" if (flow.id === 'flow-split') {\n"
|
| 344 |
+
" // Snapshot original turns once to avoid drift on repeated grouping\n"
|
| 345 |
+
" const getOriginalTurns = () => {\n"
|
| 346 |
+
" if (!flow.dataset.origData) {\n"
|
| 347 |
+
" const data = [];\n"
|
| 348 |
+
" const cols0 = flow.querySelectorAll('.split-col');\n"
|
| 349 |
+
" cols0.forEach(col => {\n"
|
| 350 |
+
" const agent = col.getAttribute('data-agent') || '';\n"
|
| 351 |
+
" col.querySelectorAll(':scope > .chat-turn').forEach(el => {\n"
|
| 352 |
+
" const t = parseInt(el.getAttribute('data-time-step')||'0',10);\n"
|
| 353 |
+
" data.push({agent, time:t, html: el.outerHTML});\n"
|
| 354 |
+
" });\n"
|
| 355 |
+
" });\n"
|
| 356 |
+
" flow.dataset.origData = JSON.stringify(data);\n"
|
| 357 |
+
" }\n"
|
| 358 |
+
" return JSON.parse(flow.dataset.origData);\n"
|
| 359 |
+
" };\n"
|
| 360 |
+
" const original = getOriginalTurns();\n"
|
| 361 |
+
" const agents = Array.from(new Set(original.map(o => o.agent))).sort();\n"
|
| 362 |
+
" const groups = new Map();\n"
|
| 363 |
+
" original.forEach(o => {\n"
|
| 364 |
+
" const g = n && n > 0 ? Math.floor(o.time / n) : 0;\n"
|
| 365 |
+
" if (!groups.has(g)) groups.set(g, new Map());\n"
|
| 366 |
+
" const gm = groups.get(g);\n"
|
| 367 |
+
" if (!gm.has(o.agent)) gm.set(o.agent, []);\n"
|
| 368 |
+
" gm.get(o.agent).push(o);\n"
|
| 369 |
+
" });\n"
|
| 370 |
+
" flow.innerHTML = '';\n"
|
| 371 |
+
" const sorted = Array.from(groups.keys()).sort((a,b)=>a-b);\n"
|
| 372 |
+
" sorted.forEach(g => {\n"
|
| 373 |
+
" const div = document.createElement('div');\n"
|
| 374 |
+
" div.className = 'group-divider';\n"
|
| 375 |
+
" const label = document.createElement('span');\n"
|
| 376 |
+
" label.className = 'group-label';\n"
|
| 377 |
+
" label.textContent = `Round ${g+1}`;\n"
|
| 378 |
+
" div.appendChild(label);\n"
|
| 379 |
+
" flow.appendChild(div);\n"
|
| 380 |
+
" const wrapper = document.createElement('div');\n"
|
| 381 |
+
" wrapper.className = 'split-wrapper';\n"
|
| 382 |
+
" agents.forEach(agent => {\n"
|
| 383 |
+
" const colDiv = document.createElement('div');\n"
|
| 384 |
+
" colDiv.className = 'split-col';\n"
|
| 385 |
+
" colDiv.setAttribute('data-agent', agent);\n"
|
| 386 |
+
" (groups.get(g).get(agent) || []).forEach(o => { colDiv.insertAdjacentHTML('beforeend', o.html); });\n"
|
| 387 |
+
" wrapper.appendChild(colDiv);\n"
|
| 388 |
+
" });\n"
|
| 389 |
+
" if (wrapper.children.length === 2) { const res = document.createElement('div'); res.className='split-resizer'; wrapper.insertBefore(res, wrapper.children[1]); }\n"
|
| 390 |
+
" flow.appendChild(wrapper);\n"
|
| 391 |
+
" });\n"
|
| 392 |
+
" flow.classList.toggle('hide-ts-badges', n === 1);\n"
|
| 393 |
+
" flow.classList.toggle('strong-hide', strongHideOn);\n"
|
| 394 |
+
" document.body.classList.add('split-mode');\n"
|
| 395 |
+
" } else {\n"
|
| 396 |
+
" groupContainer(flow, n);\n"
|
| 397 |
+
" }\n"
|
| 398 |
+
" }\n"
|
| 399 |
+
" applyRangeFilter();\n"
|
| 400 |
+
" initSplitResizers();\n"
|
| 401 |
+
" }\n"
|
| 402 |
+
" function initSplitResizers() {\n"
|
| 403 |
+
" const wrappers = document.querySelectorAll('#flow-split .split-wrapper');\n"
|
| 404 |
+
" wrappers.forEach(wrap => {\n"
|
| 405 |
+
" const resizer = wrap.querySelector('.split-resizer');\n"
|
| 406 |
+
" if (!resizer || resizer.dataset.bound) return; resizer.dataset.bound='1';\n"
|
| 407 |
+
" const cols = wrap.querySelectorAll('.split-col'); if (cols.length !== 2) return; const c0=cols[0], c1=cols[1];\n"
|
| 408 |
+
" c0.style.flex=c1.style.flex='1 1 0'; c0.style.width=c1.style.width='';\n"
|
| 409 |
+
" requestAnimationFrame(()=>{ const w0=c0.scrollWidth,w1=c1.scrollWidth,total=w0+w1||1; let p0=w0/total,p1=w1/total; const minP=0.25,maxP=0.75; if(p0<minP){p0=minP;p1=1-p0;} else if(p0>maxP){p0=maxP;p1=1-p0;} c0.style.flex='0 0 '+(p0*100).toFixed(2)+'%'; c1.style.flex='0 0 '+(p1*100).toFixed(2)+'%'; });\n"
|
| 410 |
+
" let dragging=false,startX=0,startP0=0;\n"
|
| 411 |
+
" const onDown=e=>{ dragging=true; startX=e.clientX; wrap.classList.add('resizing'); resizer.classList.add('dragging'); const rect=wrap.getBoundingClientRect(); const w=rect.width; const c0Rect=c0.getBoundingClientRect(); startP0=c0Rect.width/w; document.body.style.cursor='col-resize'; e.preventDefault(); };\n"
|
| 412 |
+
" const onMove=e=>{ if(!dragging)return; const rect=wrap.getBoundingClientRect(); const w=rect.width; let delta=(e.clientX-startX)/w; let newP0=startP0+delta; const minP=0.15,maxP=0.85; if(newP0<minP)newP0=minP; if(newP0>maxP)newP0=maxP; c0.style.flex='0 0 '+(newP0*100).toFixed(2)+'%'; c1.style.flex='0 0 '+((1-newP0)*100).toFixed(2)+'%'; };\n"
|
| 413 |
+
" const onUp=()=>{ if(!dragging)return; dragging=false; wrap.classList.remove('resizing'); resizer.classList.remove('dragging'); document.body.style.cursor=''; };\n"
|
| 414 |
+
" resizer.addEventListener('mousedown', onDown); window.addEventListener('mousemove', onMove); window.addEventListener('mouseup', onUp);\n"
|
| 415 |
+
" resizer.addEventListener('dblclick', e=>{ if(e.shiftKey){ c0.style.flex=c1.style.flex='1 1 0'; requestAnimationFrame(()=>{ const w0=c0.scrollWidth,w1=c1.scrollWidth,total=w0+w1||1; let p0=w0/total,p1=w1/total; const minP=0.25,maxP=0.75; if(p0<minP){p0=minP;p1=1-p0;} else if(p0>maxP){p0=maxP;p1=1-p0;} c0.style.flex='0 0 '+(p0*100).toFixed(2)+'%'; c1.style.flex='0 0 '+(p1*100).toFixed(2)+'%'; }); } else { c0.style.flex='0 0 50%'; c1.style.flex='0 0 50%'; } });\n"
|
| 416 |
+
" });\n"
|
| 417 |
+
" }\n"
|
| 418 |
+
" initSplitResizers();\n"
|
| 419 |
+
" const input = document.getElementById('group-size');\n"
|
| 420 |
+
" const btn = document.getElementById('apply-grouping');\n"
|
| 421 |
+
" if (btn && input) {\n"
|
| 422 |
+
" btn.addEventListener('click', () => { const n = parseInt(input.value || '0', 10); applyGrouping(n); });\n"
|
| 423 |
+
" input.addEventListener('keydown', (e) => { if (e.key === 'Enter') { const n = parseInt(input.value || '0', 10); applyGrouping(n); } });\n"
|
| 424 |
+
" }\n"
|
| 425 |
+
" if (input) { input.value = '1'; applyGrouping(1); }\n"
|
| 426 |
+
" const rangeStart = document.getElementById('range-start');\n"
|
| 427 |
+
" const rangeEnd = document.getElementById('range-end');\n"
|
| 428 |
+
" const rangeBtn = document.getElementById('apply-range');\n"
|
| 429 |
+
" if (rangeBtn && rangeStart && rangeEnd) {\n"
|
| 430 |
+
" const applyRange = () => {\n"
|
| 431 |
+
" const sv = parseInt(rangeStart.value || '', 10);\n"
|
| 432 |
+
" const ev = parseInt(rangeEnd.value || '', 10);\n"
|
| 433 |
+
" currentRangeStart = Number.isFinite(sv) ? sv : null;\n"
|
| 434 |
+
" currentRangeEnd = Number.isFinite(ev) ? ev : null;\n"
|
| 435 |
+
" applyRangeFilter();\n"
|
| 436 |
+
" };\n"
|
| 437 |
+
" rangeBtn.addEventListener('click', applyRange);\n"
|
| 438 |
+
" rangeStart.addEventListener('keydown', (e) => { if (e.key === 'Enter') applyRange(); });\n"
|
| 439 |
+
" rangeEnd.addEventListener('keydown', (e) => { if (e.key === 'Enter') applyRange(); });\n"
|
| 440 |
+
" }\n"
|
| 441 |
+
" const strongHideBtn = document.getElementById('toggle-strong-hide');\n"
|
| 442 |
+
" const strongHideStateEl = document.getElementById('strong-hide-state');\n"
|
| 443 |
+
" if (strongHideBtn) {\n"
|
| 444 |
+
" const setLabel = () => { if (strongHideStateEl) { strongHideStateEl.textContent = strongHideOn ? 'On' : 'Off'; } };\n"
|
| 445 |
+
" strongHideBtn.addEventListener('click', () => { strongHideOn = !strongHideOn; for (const f of activeFlows()) { f.classList.toggle('strong-hide', strongHideOn); } setLabel(); });\n"
|
| 446 |
+
" if (strongHideOn) { for (const f of activeFlows()) { f.classList.add('strong-hide'); } }\n"
|
| 447 |
+
" setLabel();\n"
|
| 448 |
+
" }\n"
|
| 449 |
+
" const splitBtn = document.getElementById('toggle-split-view');\n"
|
| 450 |
+
" const splitStateEl = document.getElementById('split-view-state');\n"
|
| 451 |
+
" if (splitBtn && splitFlow && linearFlow) {\n"
|
| 452 |
+
" const updateSplit = () => { if (splitStateEl) splitStateEl.textContent = splitViewOn ? 'On' : 'Off'; };\n"
|
| 453 |
+
" splitBtn.addEventListener('click', () => { splitViewOn = !splitViewOn; linearFlow.style.display = splitViewOn ? 'none' : ''; splitFlow.style.display = splitViewOn ? '' : 'none'; applyGrouping(parseInt(input.value||'1',10)); updateSplit(); });\n"
|
| 454 |
+
" updateSplit();\n"
|
| 455 |
+
" }\n"
|
| 456 |
+
"});\n"
|
| 457 |
+
"</script>",
|
| 458 |
+
"</head>",
|
| 459 |
+
"<body>",
|
| 460 |
+
'<div class="toolbar-wrap">',
|
| 461 |
+
'<div class="toolbar-hotzone"></div>',
|
| 462 |
+
'<div class="toolbar">',
|
| 463 |
+
'<label for="group-size">Group every</label>',
|
| 464 |
+
'<input id="group-size" type="number" min="0" step="1" value="1" />',
|
| 465 |
+
"<span>timesteps</span>",
|
| 466 |
+
'<button id="apply-grouping">Apply</button>',
|
| 467 |
+
'<span style="margin-left:8px"></span>',
|
| 468 |
+
'<label for="range-start"><span class="emoji-bw">🔎</span> Range</label>',
|
| 469 |
+
'<input id="range-start" type="number" step="1" />',
|
| 470 |
+
"<span>to</span>",
|
| 471 |
+
'<input id="range-end" type="number" step="1" />',
|
| 472 |
+
'<button id="apply-range"><span class="emoji-bw">▶︎</span> Apply</button>',
|
| 473 |
+
'<button id="toggle-strong-hide"><span class="emoji-bw">🗜️</span> Strong Hide: <span id="strong-hide-state">Off</span></button>',
|
| 474 |
+
(
|
| 475 |
+
'<button id="toggle-split-view"><span class="emoji-bw">🪟</span> Split View: <span id="split-view-state">Off</span></button>'
|
| 476 |
+
if enable_split_view
|
| 477 |
+
else ""
|
| 478 |
+
),
|
| 479 |
+
"</div>",
|
| 480 |
+
"</div>",
|
| 481 |
+
'<div id="flow-linear" class="messages-flow">',
|
| 482 |
+
]
|
| 483 |
+
|
| 484 |
+
last_time_step = None
|
| 485 |
+
for original_index, turn in indexed_turns:
|
| 486 |
+
# Build classes
|
| 487 |
+
agent_class = f"agent-{re.sub('[^a-z0-9_-]', '-', turn.agent_id.lower())}"
|
| 488 |
+
role_class = f"role-{turn.role}"
|
| 489 |
+
collapsed_class = " collapsed" if turn.role == "user" else ""
|
| 490 |
+
|
| 491 |
+
# Badge content
|
| 492 |
+
if turn.role == "assistant":
|
| 493 |
+
name = html.escape(turn.agent_id)
|
| 494 |
+
emoji = '<span class="emoji-bw"> 🤖</span>'
|
| 495 |
+
raw_val = turn.reward
|
| 496 |
+
if isinstance(raw_val, (int, float)):
|
| 497 |
+
reward_val = f"{raw_val:.4f}".rstrip("0").rstrip(".")
|
| 498 |
+
if len(reward_val) > 8:
|
| 499 |
+
reward_val = reward_val[:8] + "…"
|
| 500 |
+
else:
|
| 501 |
+
reward_val = str(raw_val)
|
| 502 |
+
# Format: "🤖 Alice • Reward: 5.5556 • 💬 :"
|
| 503 |
+
badge_inner = (
|
| 504 |
+
f'{emoji} <span class="agent-name">{name}</span>'
|
| 505 |
+
f' <span class="sep"> • </span><span class="reward">Reward ⚑ = {reward_val}</span>'
|
| 506 |
+
)
|
| 507 |
+
else:
|
| 508 |
+
# For user messages, show "User of {Agent ID}" in the badge
|
| 509 |
+
name = "User of " + html.escape(turn.agent_id)
|
| 510 |
+
emoji = '<span class="emoji-bw">⚙️</span>'
|
| 511 |
+
# Format (no reward): "⚙️ User of Alice • "
|
| 512 |
+
badge_inner = f'{emoji} <span class="agent-name">{name}</span> <span class="sep"> • </span>:'
|
| 513 |
+
|
| 514 |
+
badge = f'<span class="agent-badge">{badge_inner}</span>'
|
| 515 |
+
|
| 516 |
+
# Inline timestep distinction badge at step boundaries (render before first message)
|
| 517 |
+
ts_badge_html = ""
|
| 518 |
+
if last_time_step is None or turn.time_step != last_time_step:
|
| 519 |
+
ts_badge_html = f'<span class="ts-badge">⏱ {turn.time_step}</span>'
|
| 520 |
+
last_time_step = turn.time_step
|
| 521 |
+
|
| 522 |
+
escaped_content = html.escape(turn.content)
|
| 523 |
+
reasoning_html = ""
|
| 524 |
+
if turn.reasoning_content:
|
| 525 |
+
# Normalize reasoning to avoid leading/newline whitespace that creates visual gaps
|
| 526 |
+
_raw_reasoning = turn.reasoning_content.replace("\r\n", "\n")
|
| 527 |
+
_raw_reasoning = _re.sub(
|
| 528 |
+
r"^\s*\n+", "", _raw_reasoning
|
| 529 |
+
) # drop leading blank lines
|
| 530 |
+
_raw_reasoning = _re.sub(
|
| 531 |
+
r"\*\*(\s*\n\s*)", r"** ", _raw_reasoning
|
| 532 |
+
) # newline right after **
|
| 533 |
+
_raw_reasoning = _re.sub(
|
| 534 |
+
r"(\s*\n\s*)\*\*", r" **", _raw_reasoning
|
| 535 |
+
) # newline right before **
|
| 536 |
+
escaped_reasoning = html.escape(_raw_reasoning)
|
| 537 |
+
reasoning_html = f'<span class="reasoning-inline"><span class="reasoning-icon">💭</span><span class="reasoning-text">{escaped_reasoning}</span></span>'
|
| 538 |
+
collapsed_text = re.sub(r"\s+", " ", escaped_content).strip()
|
| 539 |
+
|
| 540 |
+
html_parts.append(
|
| 541 |
+
f'<div class="chat-turn {agent_class} {role_class}{collapsed_class}" data-time-step="{turn.time_step}">'
|
| 542 |
+
f'<div class="turn-content {agent_class} {role_class}">{ts_badge_html}{badge}'
|
| 543 |
+
f'<span class="message-box">{reasoning_html}<span class="main-content">💬 {collapsed_text}</span></span>'
|
| 544 |
+
f'<span class="message-placeholder">(...)</span>'
|
| 545 |
+
f"</div>"
|
| 546 |
+
f"</div>"
|
| 547 |
+
)
|
| 548 |
+
|
| 549 |
+
html_parts.append("</div>") # close linear flow
|
| 550 |
+
if enable_split_view:
|
| 551 |
+
import html as _html_mod
|
| 552 |
+
|
| 553 |
+
html_parts.append(
|
| 554 |
+
'<div id="flow-split" class="messages-flow" style="display:none">'
|
| 555 |
+
)
|
| 556 |
+
html_parts.append('<div class="split-wrapper">')
|
| 557 |
+
# Per-agent columns
|
| 558 |
+
per_agent_turns = {
|
| 559 |
+
aid: [t for t in chat_turns if t.agent_id == aid]
|
| 560 |
+
for aid in assistant_agents
|
| 561 |
+
}
|
| 562 |
+
for idx, aid in enumerate(assistant_agents):
|
| 563 |
+
turns_agent = per_agent_turns[aid]
|
| 564 |
+
html_parts.append(
|
| 565 |
+
f'<div class="split-col" data-agent="{_html_mod.escape(aid)}">'
|
| 566 |
+
)
|
| 567 |
+
last_ts_agent = None
|
| 568 |
+
for turn in turns_agent:
|
| 569 |
+
agent_class = (
|
| 570 |
+
f"agent-{re.sub('[^a-z0-9_-]', '-', turn.agent_id.lower())}"
|
| 571 |
+
)
|
| 572 |
+
role_class = f"role-{turn.role}"
|
| 573 |
+
collapsed_class = " collapsed" if turn.role == "user" else ""
|
| 574 |
+
ts_badge_html = ""
|
| 575 |
+
if last_ts_agent is None or turn.time_step != last_ts_agent:
|
| 576 |
+
ts_badge_html = f'<span class="ts-badge">⏱ {turn.time_step}</span>'
|
| 577 |
+
last_ts_agent = turn.time_step
|
| 578 |
+
esc_content = _html_mod.escape(turn.content)
|
| 579 |
+
reasoning_html = ""
|
| 580 |
+
if turn.reasoning_content:
|
| 581 |
+
_raw_reasoning = turn.reasoning_content.replace("\r\n", "\n")
|
| 582 |
+
_raw_reasoning = _re.sub(r"^\s*\n+", "", _raw_reasoning)
|
| 583 |
+
_raw_reasoning = _re.sub(r"\*\*(\s*\n\s*)", r"** ", _raw_reasoning)
|
| 584 |
+
_raw_reasoning = _re.sub(r"(\s*\n\s*)\*\*", r" **", _raw_reasoning)
|
| 585 |
+
esc_reasoning = _html_mod.escape(_raw_reasoning)
|
| 586 |
+
reasoning_html = f'<span class="reasoning-inline"><span class="reasoning-icon">💭</span><span class="reasoning-text">{esc_reasoning}</span></span>'
|
| 587 |
+
collapsed_text = re.sub(r"\s+", " ", esc_content).strip()
|
| 588 |
+
if turn.role == "assistant":
|
| 589 |
+
name = _html_mod.escape(turn.agent_id)
|
| 590 |
+
emoji = '<span class="emoji-bw"> 🤖</span>'
|
| 591 |
+
raw_val = turn.reward
|
| 592 |
+
if isinstance(raw_val, (int, float)):
|
| 593 |
+
reward_val = f"{raw_val:.4f}".rstrip("0").rstrip(".")
|
| 594 |
+
if len(reward_val) > 8:
|
| 595 |
+
reward_val = reward_val[:8] + "…"
|
| 596 |
+
else:
|
| 597 |
+
reward_val = str(raw_val)
|
| 598 |
+
badge_inner = (
|
| 599 |
+
f'{emoji} <span class="agent-name">{name}</span>'
|
| 600 |
+
f' <span class="sep"> • </span><span class="reward">Reward ⚑ : {reward_val}</span>'
|
| 601 |
+
)
|
| 602 |
+
else:
|
| 603 |
+
name = "User of " + _html_mod.escape(turn.agent_id)
|
| 604 |
+
emoji = '<span class="emoji-bw">⚙️</span>'
|
| 605 |
+
badge_inner = f'{emoji} <span class="agent-name">{name}</span> <span class="sep"> • </span>:'
|
| 606 |
+
badge = f'<span class="agent-badge">{badge_inner}</span>'
|
| 607 |
+
html_parts.append(
|
| 608 |
+
f'<div class="chat-turn {agent_class} {role_class}{collapsed_class}" data-time-step="{turn.time_step}">'
|
| 609 |
+
f'<div class="turn-content {agent_class} {role_class}">{ts_badge_html}{badge}'
|
| 610 |
+
f'<span class="message-box">{reasoning_html}<span class="main-content">💬 {collapsed_text}</span></span>'
|
| 611 |
+
f'<span class="message-placeholder">(...)</span>'
|
| 612 |
+
f"</div></div>"
|
| 613 |
+
)
|
| 614 |
+
html_parts.append("</div>") # close split col
|
| 615 |
+
html_parts.append("</div>") # split-wrapper
|
| 616 |
+
html_parts.append("</div>") # flow-split
|
| 617 |
+
html_parts.extend(["</body>", "</html>"])
|
| 618 |
+
|
| 619 |
+
return "\n".join(html_parts)
|
| 620 |
+
|
| 621 |
+
|
| 622 |
+
def export_html_from_rollout_tree(path: Path, outdir: Path, main_only: bool = False):
|
| 623 |
+
"""Process a rollout tree file and generate HTML files for each path.
|
| 624 |
+
Creates separate HTML files for the main path and each branch path.
|
| 625 |
+
The main path is saved in the root output directory, while branch paths
|
| 626 |
+
are saved in a 'branches' subdirectory.
|
| 627 |
+
|
| 628 |
+
Args:
|
| 629 |
+
path: Path to the rollout tree JSON file
|
| 630 |
+
outdir: Output directory for HTML files
|
| 631 |
+
main_only: If True, only export the main trajectory (default: False)
|
| 632 |
+
"""
|
| 633 |
+
root = load_rollout_tree(path)
|
| 634 |
+
mgid = root.id
|
| 635 |
+
|
| 636 |
+
main_path, branch_paths = get_rollout_tree_paths(root)
|
| 637 |
+
|
| 638 |
+
outdir.mkdir(parents=True, exist_ok=True)
|
| 639 |
+
|
| 640 |
+
# Create branches subdirectory if we have branch paths
|
| 641 |
+
if not main_only and branch_paths:
|
| 642 |
+
branches_dir = outdir / f"mgid:{mgid}_branches_html_renders"
|
| 643 |
+
branches_dir.mkdir(parents=True, exist_ok=True)
|
| 644 |
+
|
| 645 |
+
# Generate HTML for the main path
|
| 646 |
+
chat_turns = gather_all_chat_turns_for_path(main_path)
|
| 647 |
+
html_content = html_from_chat_turns(chat_turns)
|
| 648 |
+
output_file = outdir / f"mgid:{mgid}_main_html_render.render.html"
|
| 649 |
+
with open(output_file, "w", encoding="utf-8") as f:
|
| 650 |
+
f.write(html_content)
|
| 651 |
+
|
| 652 |
+
# Generate HTML for each branch path
|
| 653 |
+
for path_obj in branch_paths:
|
| 654 |
+
chat_turns = gather_all_chat_turns_for_path(path_obj)
|
| 655 |
+
|
| 656 |
+
html_content = html_from_chat_turns(chat_turns)
|
| 657 |
+
|
| 658 |
+
path_id: str = path_obj.id
|
| 659 |
+
output_filename = f"{path_id}_html_render.render.html"
|
| 660 |
+
|
| 661 |
+
output_file = branches_dir / output_filename
|
| 662 |
+
|
| 663 |
+
with open(output_file, "w", encoding="utf-8") as f:
|
| 664 |
+
f.write(html_content)
|
src_code_for_reproducibility/utils/rollout_tree_gather_utils.py
ADDED
|
@@ -0,0 +1,314 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import csv
|
| 4 |
+
import os
|
| 5 |
+
import pickle
|
| 6 |
+
import re
|
| 7 |
+
from collections import defaultdict
|
| 8 |
+
from dataclasses import dataclass
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple
|
| 11 |
+
|
| 12 |
+
from mllm.markov_games.rollout_tree import *
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def load_rollout_tree(path: Path) -> RolloutTreeRootNode:
|
| 19 |
+
"""Load a rollout tree from a PKL file containing a dict."""
|
| 20 |
+
with open(path, "rb") as f:
|
| 21 |
+
data = pickle.load(f)
|
| 22 |
+
return RolloutTreeRootNode.model_validate(data)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@dataclass
|
| 26 |
+
class RolloutNodeList:
|
| 27 |
+
id: str
|
| 28 |
+
nodes: List[RolloutTreeNode]
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def get_rollout_tree_paths(
|
| 32 |
+
root: RolloutTreeRootNode, mgid: Optional[str] = None
|
| 33 |
+
) -> Tuple[RolloutNodeList, List[RolloutNodeList]]:
|
| 34 |
+
"""
|
| 35 |
+
Returns:
|
| 36 |
+
main_path: The main path from the root to the end of the tree.
|
| 37 |
+
branch_paths: A list of all branch paths from the root to the end of the tree.
|
| 38 |
+
Each branch path contains a list of nodes that are part of the branch, including the nodes from the main path before the branch was taken.
|
| 39 |
+
"""
|
| 40 |
+
branch_paths = []
|
| 41 |
+
|
| 42 |
+
def collect_path_nodes(current) -> List[RolloutTreeNode]:
|
| 43 |
+
"""Recursively collect all nodes in a path starting from current node."""
|
| 44 |
+
if current is None:
|
| 45 |
+
return []
|
| 46 |
+
|
| 47 |
+
if isinstance(current, RolloutTreeNode):
|
| 48 |
+
return [current] + collect_path_nodes(current.child)
|
| 49 |
+
|
| 50 |
+
elif isinstance(current, RolloutTreeBranchNode):
|
| 51 |
+
# For branch nodes, we only follow the main_child for path collection
|
| 52 |
+
if current.main_child:
|
| 53 |
+
return [current.main_child] + collect_path_nodes(
|
| 54 |
+
current.main_child.child
|
| 55 |
+
)
|
| 56 |
+
else:
|
| 57 |
+
return []
|
| 58 |
+
|
| 59 |
+
def traverse_for_branches(
|
| 60 |
+
current,
|
| 61 |
+
main_path_prefix: List[RolloutTreeNode],
|
| 62 |
+
path_id: str,
|
| 63 |
+
current_time_step: Optional[int] = 0,
|
| 64 |
+
):
|
| 65 |
+
"""Traverse tree to collect all branch paths."""
|
| 66 |
+
if current is None:
|
| 67 |
+
return
|
| 68 |
+
|
| 69 |
+
if isinstance(current, RolloutTreeNode):
|
| 70 |
+
# Continue traversing with this node added to the main path prefix
|
| 71 |
+
new_prefix = main_path_prefix + [current]
|
| 72 |
+
traverse_for_branches(current.child, new_prefix, path_id, current.time_step)
|
| 73 |
+
|
| 74 |
+
elif isinstance(current, RolloutTreeBranchNode):
|
| 75 |
+
# Collect all branch paths
|
| 76 |
+
if current.branches:
|
| 77 |
+
for agent_id, branch_node_list in current.branches.items():
|
| 78 |
+
if branch_node_list:
|
| 79 |
+
# Start with the main path prefix, then recursively collect all nodes in this branch
|
| 80 |
+
branch_path_nodes = main_path_prefix.copy()
|
| 81 |
+
for branch_node in branch_node_list:
|
| 82 |
+
branch_path_nodes.extend(collect_path_nodes(branch_node))
|
| 83 |
+
|
| 84 |
+
# Create proper branch path ID with mgid, agent_id, and time_step
|
| 85 |
+
mgid_str = mgid or str(root.id)
|
| 86 |
+
branch_path_id = f"mgid:{mgid_str}_type:branch_agent:{agent_id}_time_step:{current_time_step}"
|
| 87 |
+
branch_paths.append(
|
| 88 |
+
RolloutNodeList(id=branch_path_id, nodes=branch_path_nodes)
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
# Process the main child and add to prefix
|
| 92 |
+
new_prefix = main_path_prefix
|
| 93 |
+
if current.main_child:
|
| 94 |
+
new_prefix = main_path_prefix + [current.main_child]
|
| 95 |
+
|
| 96 |
+
# Continue traversing the main path
|
| 97 |
+
if current.main_child:
|
| 98 |
+
traverse_for_branches(
|
| 99 |
+
current.main_child.child,
|
| 100 |
+
new_prefix,
|
| 101 |
+
path_id,
|
| 102 |
+
current.main_child.time_step,
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
# Collect the main path nodes
|
| 106 |
+
main_path_nodes = collect_path_nodes(root.child)
|
| 107 |
+
|
| 108 |
+
# Traverse to collect all branch paths
|
| 109 |
+
traverse_for_branches(root.child, [], "")
|
| 110 |
+
|
| 111 |
+
# Create the main path with proper mgid format
|
| 112 |
+
mgid_str = mgid or str(root.id)
|
| 113 |
+
main_path = RolloutNodeList(id=f"mgid:{mgid_str}_type:main", nodes=main_path_nodes)
|
| 114 |
+
|
| 115 |
+
return main_path, branch_paths
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
class ChatTurnLog(BaseModel):
|
| 119 |
+
time_step: int
|
| 120 |
+
agent_id: str
|
| 121 |
+
role: str
|
| 122 |
+
content: str
|
| 123 |
+
reasoning_content: Optional[str] = None
|
| 124 |
+
is_state_end: bool
|
| 125 |
+
reward: float
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def gather_agent_chat_turns_for_path(
|
| 129 |
+
agent_id: str, path: RolloutNodeList
|
| 130 |
+
) -> List[ChatTurnLog]:
|
| 131 |
+
"""Iterate through all chat turns for a specific agent in a path sorted by time step."""
|
| 132 |
+
turns = []
|
| 133 |
+
for node in path.nodes:
|
| 134 |
+
action_log = node.step_log.action_logs.get(agent_id, [])
|
| 135 |
+
if action_log:
|
| 136 |
+
for chat_turn in action_log.chat_turns or []:
|
| 137 |
+
turns.append(
|
| 138 |
+
ChatTurnLog(
|
| 139 |
+
time_step=node.time_step,
|
| 140 |
+
agent_id=agent_id,
|
| 141 |
+
role=chat_turn.role,
|
| 142 |
+
content=chat_turn.content,
|
| 143 |
+
reasoning_content=getattr(chat_turn, "reasoning_content", None),
|
| 144 |
+
is_state_end=chat_turn.is_state_end,
|
| 145 |
+
reward=node.step_log.simulation_step_log.rewards.get(
|
| 146 |
+
agent_id, 0
|
| 147 |
+
),
|
| 148 |
+
)
|
| 149 |
+
)
|
| 150 |
+
return turns
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def gather_all_chat_turns_for_path(path: RolloutNodeList) -> List[ChatTurnLog]:
|
| 154 |
+
"""Iterate through all chat turns for all agents in a path sorted by time step."""
|
| 155 |
+
turns = []
|
| 156 |
+
|
| 157 |
+
# Collect turns from all agents, but interleave them per timestep by (user, assistant) pairs
|
| 158 |
+
for node in path.nodes:
|
| 159 |
+
# Build (user[, assistant]) pairs for each agent at this timestep
|
| 160 |
+
agent_ids = sorted(list(node.step_log.action_logs.keys()))
|
| 161 |
+
per_agent_pairs: Dict[str, List[List[ChatTurnLog]]] = {}
|
| 162 |
+
|
| 163 |
+
for agent_id in agent_ids:
|
| 164 |
+
action_log = node.step_log.action_logs.get(agent_id)
|
| 165 |
+
pairs: List[List[ChatTurnLog]] = []
|
| 166 |
+
current_pair: List[ChatTurnLog] = []
|
| 167 |
+
|
| 168 |
+
if action_log and action_log.chat_turns:
|
| 169 |
+
for chat_turn in action_log.chat_turns:
|
| 170 |
+
turn_log = ChatTurnLog(
|
| 171 |
+
time_step=node.time_step,
|
| 172 |
+
agent_id=agent_id,
|
| 173 |
+
role=chat_turn.role,
|
| 174 |
+
content=chat_turn.content,
|
| 175 |
+
reasoning_content=getattr(chat_turn, "reasoning_content", None),
|
| 176 |
+
is_state_end=chat_turn.is_state_end,
|
| 177 |
+
reward=node.step_log.simulation_step_log.rewards.get(
|
| 178 |
+
agent_id, 0
|
| 179 |
+
),
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
if chat_turn.role == "user":
|
| 183 |
+
# If a previous pair is open, close it and start a new one
|
| 184 |
+
if current_pair:
|
| 185 |
+
pairs.append(current_pair)
|
| 186 |
+
current_pair = []
|
| 187 |
+
current_pair = [turn_log]
|
| 188 |
+
else:
|
| 189 |
+
# assistant: attach to an open user message if present; otherwise stand alone
|
| 190 |
+
if (
|
| 191 |
+
current_pair
|
| 192 |
+
and len(current_pair) == 1
|
| 193 |
+
and current_pair[0].role == "user"
|
| 194 |
+
):
|
| 195 |
+
current_pair.append(turn_log)
|
| 196 |
+
pairs.append(current_pair)
|
| 197 |
+
current_pair = []
|
| 198 |
+
else:
|
| 199 |
+
# No preceding user or already paired; treat as its own unit
|
| 200 |
+
pairs.append([turn_log])
|
| 201 |
+
|
| 202 |
+
if current_pair:
|
| 203 |
+
# Unpaired trailing user message
|
| 204 |
+
pairs.append(current_pair)
|
| 205 |
+
|
| 206 |
+
per_agent_pairs[agent_id] = pairs
|
| 207 |
+
|
| 208 |
+
# Interleave pairs across agents: A1, B1, A2, B2, ...
|
| 209 |
+
index = 0
|
| 210 |
+
while True:
|
| 211 |
+
added_any = False
|
| 212 |
+
for agent_id in agent_ids:
|
| 213 |
+
agent_pairs = per_agent_pairs.get(agent_id, [])
|
| 214 |
+
if index < len(agent_pairs):
|
| 215 |
+
for tl in agent_pairs[index]:
|
| 216 |
+
turns.append(tl)
|
| 217 |
+
added_any = True
|
| 218 |
+
if not added_any:
|
| 219 |
+
break
|
| 220 |
+
index += 1
|
| 221 |
+
|
| 222 |
+
return turns
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
def chat_turns_to_dict(chat_turns: Iterator[ChatTurnLog]) -> Iterator[Dict[str, Any]]:
|
| 226 |
+
"""Render all chat turns for a path as structured data for JSON."""
|
| 227 |
+
for chat_turn in chat_turns:
|
| 228 |
+
yield chat_turn.model_dump()
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
def get_all_agents(root: RolloutTreeRootNode) -> List[str]:
|
| 232 |
+
"""list of all agent IDs that appear in the tree."""
|
| 233 |
+
if root.child is None:
|
| 234 |
+
return []
|
| 235 |
+
|
| 236 |
+
# Get the first node to extract all agent IDs
|
| 237 |
+
first_node = root.child
|
| 238 |
+
if isinstance(first_node, RolloutTreeBranchNode):
|
| 239 |
+
first_node = first_node.main_child
|
| 240 |
+
|
| 241 |
+
if first_node is None:
|
| 242 |
+
return []
|
| 243 |
+
|
| 244 |
+
# All agents should be present in the first node
|
| 245 |
+
agents = set(first_node.step_log.action_logs.keys())
|
| 246 |
+
agents.update(first_node.step_log.simulation_step_log.rewards.keys())
|
| 247 |
+
|
| 248 |
+
return sorted(list(agents))
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
def gather_agent_main_rewards(agent_id: str, path: RolloutNodeList) -> List[float]:
|
| 252 |
+
"""Gather main rewards for a specific agent in a path."""
|
| 253 |
+
rewards = []
|
| 254 |
+
for node in path.nodes:
|
| 255 |
+
reward = node.step_log.simulation_step_log.rewards[agent_id]
|
| 256 |
+
rewards.append(reward)
|
| 257 |
+
return rewards
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
def gather_all_rewards(path: RolloutNodeList) -> List[Dict[AgentId, float]]:
|
| 261 |
+
"""Gather main rewards from main trajectory in a path."""
|
| 262 |
+
rewards = []
|
| 263 |
+
for node in path.nodes:
|
| 264 |
+
rewards.append(node.step_log.simulation_step_log.rewards.copy())
|
| 265 |
+
return rewards
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
def gather_simulation_stats(
|
| 269 |
+
path: RolloutNodeList,
|
| 270 |
+
filter: Callable[[SimulationStepLog], bool],
|
| 271 |
+
stat_func: Callable[[SimulationStepLog], Any],
|
| 272 |
+
) -> List[Any]:
|
| 273 |
+
"""Gather stats from main trajectory in a path."""
|
| 274 |
+
stats = []
|
| 275 |
+
for node in path.nodes:
|
| 276 |
+
sl = node.step_log.simulation_step_log
|
| 277 |
+
if filter(sl):
|
| 278 |
+
stats.append(stat_func(sl))
|
| 279 |
+
return stats
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
def gather_simulation_step_logs(path: RolloutNodeList) -> List[SimulationStepLog]:
|
| 283 |
+
"""Gather simulation information from main trajectory in a path."""
|
| 284 |
+
infos = []
|
| 285 |
+
for node in path.nodes:
|
| 286 |
+
infos.append(node.step_log.simulation_step_log)
|
| 287 |
+
return infos
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
def export_chat_logs(path: Path, outdir: Path):
|
| 291 |
+
"""Process a rollout tree PKL file and generate a JSONL of chat turns as dicts.
|
| 292 |
+
Each line contains an object with path_id and chat_turns for a single path.
|
| 293 |
+
"""
|
| 294 |
+
import json
|
| 295 |
+
|
| 296 |
+
root = load_rollout_tree(path)
|
| 297 |
+
mgid = root.id
|
| 298 |
+
|
| 299 |
+
main_path, branch_paths = get_rollout_tree_paths(root)
|
| 300 |
+
all_paths = [main_path] + branch_paths
|
| 301 |
+
|
| 302 |
+
outdir.mkdir(parents=True, exist_ok=True)
|
| 303 |
+
output_file = outdir / f"mgid:{mgid}_plucked_chats.render.jsonl"
|
| 304 |
+
|
| 305 |
+
with open(output_file, "w", encoding="utf-8") as f:
|
| 306 |
+
for path_obj in all_paths:
|
| 307 |
+
chat_turns = gather_all_chat_turns_for_path(path_obj)
|
| 308 |
+
output_obj = {
|
| 309 |
+
"path_id": str(path_obj.id),
|
| 310 |
+
"chat_turns": list(chat_turns_to_dict(iter(chat_turns))),
|
| 311 |
+
}
|
| 312 |
+
f.write(json.dumps(output_obj, ensure_ascii=False) + "\n")
|
| 313 |
+
|
| 314 |
+
|
src_code_for_reproducibility/utils/rollout_tree_stats.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Callable, List, Tuple
|
| 2 |
+
|
| 3 |
+
from mllm.markov_games.rollout_tree import RolloutTreeRootNode
|
| 4 |
+
from mllm.markov_games.simulation import SimulationStepLog
|
| 5 |
+
from mllm.utils.rollout_tree_gather_utils import (
|
| 6 |
+
gather_simulation_step_logs,
|
| 7 |
+
get_rollout_tree_paths,
|
| 8 |
+
)
|
| 9 |
+
from mllm.utils.stat_pack import StatPack
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def get_rollout_tree_stat_tally(
|
| 13 |
+
rollout_tree: RolloutTreeRootNode,
|
| 14 |
+
metrics: List[Callable[[SimulationStepLog], List[Tuple[str, float]]]],
|
| 15 |
+
) -> StatPack:
|
| 16 |
+
stat_tally = StatPack()
|
| 17 |
+
# get simulation step logs
|
| 18 |
+
node_list = get_rollout_tree_paths(rollout_tree)[0]
|
| 19 |
+
simulation_step_logs = gather_simulation_step_logs(node_list)
|
| 20 |
+
for simulation_step_log in simulation_step_logs:
|
| 21 |
+
for metric in metrics:
|
| 22 |
+
metric_result = metric(simulation_step_log)
|
| 23 |
+
if metric_result is not None:
|
| 24 |
+
for key, value in metric_result:
|
| 25 |
+
stat_tally.add_stat(key, value)
|
| 26 |
+
return stat_tally
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def get_rollout_tree_mean_stats(
|
| 30 |
+
rollout_tree: RolloutTreeRootNode, metrics: List[Callable[[SimulationStepLog], Any]]
|
| 31 |
+
) -> StatPack:
|
| 32 |
+
"""Get the mean stats for a rollout tree."""
|
| 33 |
+
stat_tally = get_rollout_tree_stat_tally(rollout_tree, metrics)
|
| 34 |
+
return stat_tally.mean()
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def get_mean_rollout_tree_stats(
|
| 38 |
+
rollout_trees: List[RolloutTreeRootNode],
|
| 39 |
+
metrics: List[Callable[[SimulationStepLog], Any]],
|
| 40 |
+
) -> StatPack:
|
| 41 |
+
"""Get the mean stats for a list of rollout trees."""
|
| 42 |
+
# TODO complete this
|
| 43 |
+
stat_tallies = [
|
| 44 |
+
get_rollout_tree_mean_stats(rollout_tree, metrics)
|
| 45 |
+
for rollout_tree in rollout_trees
|
| 46 |
+
]
|
| 47 |
+
mean_stat_tally = StatPack()
|
| 48 |
+
for stat_tally in stat_tallies:
|
| 49 |
+
mean_stat_tally.add_stats(stat_tally)
|
| 50 |
+
return mean_stat_tally.mean()
|
src_code_for_reproducibility/utils/short_id_gen.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import uuid
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def generate_short_id() -> int:
|
| 5 |
+
"""
|
| 6 |
+
Generates a short unique ID for tracking adapter versions.
|
| 7 |
+
|
| 8 |
+
Returns:
|
| 9 |
+
int: An 8-digit integer ID.
|
| 10 |
+
"""
|
| 11 |
+
return int(str(uuid.uuid4().int)[:8])
|
src_code_for_reproducibility/utils/stat_pack.py
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import csv
|
| 2 |
+
import json
|
| 3 |
+
import os
|
| 4 |
+
import pickle
|
| 5 |
+
from collections import Counter
|
| 6 |
+
from copy import deepcopy
|
| 7 |
+
from locale import strcoll
|
| 8 |
+
from statistics import mean
|
| 9 |
+
from typing import Any, Dict, Iterator, List, Optional, Tuple, TypedDict
|
| 10 |
+
|
| 11 |
+
import matplotlib.pyplot as plt
|
| 12 |
+
import numpy as np
|
| 13 |
+
from torch.utils.tensorboard import SummaryWriter
|
| 14 |
+
|
| 15 |
+
plt.style.use(
|
| 16 |
+
"https://raw.githubusercontent.com/dereckpiche/DedeStyle/refs/heads/main/dedestyle.mplstyle"
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
import wandb
|
| 20 |
+
|
| 21 |
+
from . import wandb_utils
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class StatPack:
|
| 25 |
+
def __init__(self):
|
| 26 |
+
self.data = {}
|
| 27 |
+
|
| 28 |
+
def add_stat(self, key: str, value: float | int | None):
|
| 29 |
+
assert (
|
| 30 |
+
isinstance(value, float) or isinstance(value, int) or value is None
|
| 31 |
+
), f"Value {value} is not a valid type"
|
| 32 |
+
if key not in self.data:
|
| 33 |
+
self.data[key] = []
|
| 34 |
+
self.data[key].append(value)
|
| 35 |
+
|
| 36 |
+
def add_stats(self, other: "StatPack"):
|
| 37 |
+
for key in other.keys():
|
| 38 |
+
self.add_stat(key, other[key])
|
| 39 |
+
|
| 40 |
+
def __getitem__(self, key: str):
|
| 41 |
+
return self.data[key]
|
| 42 |
+
|
| 43 |
+
def __setitem__(self, key: str, value: Any):
|
| 44 |
+
self.data[key] = value
|
| 45 |
+
|
| 46 |
+
def __contains__(self, key: str):
|
| 47 |
+
return key in self.data
|
| 48 |
+
|
| 49 |
+
def __len__(self):
|
| 50 |
+
return len(self.data)
|
| 51 |
+
|
| 52 |
+
def __iter__(self):
|
| 53 |
+
return iter(self.data)
|
| 54 |
+
|
| 55 |
+
def keys(self):
|
| 56 |
+
return self.data.keys()
|
| 57 |
+
|
| 58 |
+
def values(self):
|
| 59 |
+
return self.data.values()
|
| 60 |
+
|
| 61 |
+
def items(self):
|
| 62 |
+
return self.data.items()
|
| 63 |
+
|
| 64 |
+
def mean(self):
|
| 65 |
+
mean_st = StatPack()
|
| 66 |
+
for key in self.keys():
|
| 67 |
+
if isinstance(self[key], list):
|
| 68 |
+
# TODO: exclude None values
|
| 69 |
+
non_none_values = [v for v in self[key] if v is not None]
|
| 70 |
+
if non_none_values:
|
| 71 |
+
mean_st[key] = np.mean(np.array(non_none_values))
|
| 72 |
+
else:
|
| 73 |
+
mean_st[key] = None
|
| 74 |
+
return mean_st
|
| 75 |
+
|
| 76 |
+
def store_plots(self, folder: str):
|
| 77 |
+
os.makedirs(folder, exist_ok=True)
|
| 78 |
+
for key in self.keys():
|
| 79 |
+
plt.figure(figsize=(10, 5))
|
| 80 |
+
plt.plot(self[key])
|
| 81 |
+
plt.title(key)
|
| 82 |
+
plt.savefig(os.path.join(folder, f"{key}.pdf"))
|
| 83 |
+
plt.close()
|
| 84 |
+
|
| 85 |
+
def store_numpy(self, folder: str):
|
| 86 |
+
os.makedirs(folder, exist_ok=True)
|
| 87 |
+
for key in self.keys():
|
| 88 |
+
# Sanitize filename components (avoid slashes, spaces, etc.)
|
| 89 |
+
safe_key = str(key).replace(os.sep, "_").replace("/", "_").replace(" ", "_")
|
| 90 |
+
values = self[key]
|
| 91 |
+
# Convert None to NaN for numpy compatibility
|
| 92 |
+
arr = np.array(
|
| 93 |
+
[(np.nan if (v is None) else v) for v in values], dtype=float
|
| 94 |
+
)
|
| 95 |
+
np.save(os.path.join(folder, f"{safe_key}.npy"), arr)
|
| 96 |
+
|
| 97 |
+
def store_json(self, folder: str, filename: str = "stats.json"):
|
| 98 |
+
os.makedirs(folder, exist_ok=True)
|
| 99 |
+
with open(os.path.join(folder, filename), "w") as f:
|
| 100 |
+
json.dump(self.data, f, indent=4)
|
| 101 |
+
|
| 102 |
+
def store_csv(self, folder: str):
|
| 103 |
+
os.makedirs(folder, exist_ok=True)
|
| 104 |
+
for key in self.keys():
|
| 105 |
+
with open(os.path.join(folder, f"stats.csv"), "w") as f:
|
| 106 |
+
writer = csv.writer(f)
|
| 107 |
+
writer.writerow([key] + self[key])
|
| 108 |
+
|
| 109 |
+
def store_pickle(self, folder: str):
|
| 110 |
+
os.makedirs(folder, exist_ok=True)
|
| 111 |
+
for key in self.keys():
|
| 112 |
+
with open(os.path.join(folder, f"stats.pkl"), "wb") as f:
|
| 113 |
+
pickle.dump(self[key], f)
|
src_code_for_reproducibility/utils/update_start_epoch.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
# During run, set hydra.run.dir=./outputs/{folder}
|
| 4 |
+
def update_start_epoch(cfg, output_directory):
|
| 5 |
+
if cfg["experiment"]["resume_experiment"]:
|
| 6 |
+
folders = [f for f in os.listdir(output_directory) if f.startswith("iteration_")]
|
| 7 |
+
iterations = [int(f.split("_")[1]) for f in folders] if folders else [0]
|
| 8 |
+
cfg["experiment"]["start_epoch"] = max(iterations)
|
| 9 |
+
return None
|
src_code_for_reproducibility/utils/wandb_utils.py
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from typing import Any, Dict, Optional
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
_WANDB_AVAILABLE = False
|
| 6 |
+
_WANDB_RUN = None
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def _try_import_wandb():
|
| 10 |
+
global _WANDB_AVAILABLE
|
| 11 |
+
if _WANDB_AVAILABLE:
|
| 12 |
+
return True
|
| 13 |
+
try:
|
| 14 |
+
import wandb # type: ignore
|
| 15 |
+
|
| 16 |
+
_WANDB_AVAILABLE = True
|
| 17 |
+
return True
|
| 18 |
+
except Exception:
|
| 19 |
+
_WANDB_AVAILABLE = False
|
| 20 |
+
return False
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def _safe_get(cfg: Dict[str, Any], path: list[str], default: Any = None) -> Any:
|
| 24 |
+
cur: Any = cfg
|
| 25 |
+
for key in path:
|
| 26 |
+
if not isinstance(cur, dict) or key not in cur:
|
| 27 |
+
return default
|
| 28 |
+
cur = cur[key]
|
| 29 |
+
return cur
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def is_enabled(cfg: Dict[str, Any]) -> bool:
|
| 33 |
+
return bool(_safe_get(cfg, ["logging", "wandb", "enabled"], False))
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def init(cfg: Dict[str, Any], run_dir: str, run_name: Optional[str] = None) -> None:
|
| 37 |
+
"""
|
| 38 |
+
Initialize Weights & Biases if enabled in config. No-op if disabled or wandb not installed.
|
| 39 |
+
"""
|
| 40 |
+
global _WANDB_RUN
|
| 41 |
+
if not is_enabled(cfg):
|
| 42 |
+
return
|
| 43 |
+
if not _try_import_wandb():
|
| 44 |
+
return
|
| 45 |
+
|
| 46 |
+
import wandb # type: ignore
|
| 47 |
+
|
| 48 |
+
project = _safe_get(cfg, ["logging", "wandb", "project"], "llm-negotiation")
|
| 49 |
+
entity = _safe_get(cfg, ["logging", "wandb", "entity"], None)
|
| 50 |
+
mode = _safe_get(cfg, ["logging", "wandb", "mode"], "online")
|
| 51 |
+
tags = _safe_get(cfg, ["logging", "wandb", "tags"], []) or []
|
| 52 |
+
notes = _safe_get(cfg, ["logging", "wandb", "notes"], None)
|
| 53 |
+
group = _safe_get(cfg, ["logging", "wandb", "group"], None)
|
| 54 |
+
name = _safe_get(cfg, ["logging", "wandb", "name"], run_name)
|
| 55 |
+
|
| 56 |
+
# Ensure files are written into the hydra run directory
|
| 57 |
+
os.makedirs(run_dir, exist_ok=True)
|
| 58 |
+
os.environ.setdefault("WANDB_DIR", run_dir)
|
| 59 |
+
|
| 60 |
+
# Convert cfg to plain types for W&B config; fallback to minimal dictionary
|
| 61 |
+
try:
|
| 62 |
+
from omegaconf import OmegaConf # type: ignore
|
| 63 |
+
|
| 64 |
+
cfg_container = OmegaConf.to_container(cfg, resolve=True) # type: ignore
|
| 65 |
+
except Exception:
|
| 66 |
+
cfg_container = cfg
|
| 67 |
+
|
| 68 |
+
_WANDB_RUN = wandb.init(
|
| 69 |
+
project=project,
|
| 70 |
+
entity=entity,
|
| 71 |
+
mode=mode,
|
| 72 |
+
name=name,
|
| 73 |
+
group=group,
|
| 74 |
+
tags=tags,
|
| 75 |
+
notes=notes,
|
| 76 |
+
config=cfg_container,
|
| 77 |
+
dir=run_dir,
|
| 78 |
+
reinit=True,
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def log(metrics: Dict[str, Any], step: Optional[int] = None) -> None:
|
| 83 |
+
"""Log a flat dictionary of metrics to W&B if active."""
|
| 84 |
+
if not _WANDB_AVAILABLE or _WANDB_RUN is None:
|
| 85 |
+
return
|
| 86 |
+
try:
|
| 87 |
+
import wandb # type: ignore
|
| 88 |
+
|
| 89 |
+
wandb.log(metrics if step is None else dict(metrics, step=step))
|
| 90 |
+
except Exception:
|
| 91 |
+
pass
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def _flatten(prefix: str, data: Dict[str, Any], out: Dict[str, Any]) -> None:
|
| 95 |
+
for k, v in data.items():
|
| 96 |
+
key = f"{prefix}.{k}" if prefix else k
|
| 97 |
+
if isinstance(v, dict):
|
| 98 |
+
_flatten(key, v, out)
|
| 99 |
+
else:
|
| 100 |
+
out[key] = v
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def _summarize_value(value: Any) -> Dict[str, Any]:
|
| 104 |
+
import numpy as np # local import to avoid hard dependency during disabled mode
|
| 105 |
+
|
| 106 |
+
if value is None:
|
| 107 |
+
return {"none": 1}
|
| 108 |
+
# Scalars
|
| 109 |
+
if isinstance(value, (int, float)):
|
| 110 |
+
return {"value": float(value)}
|
| 111 |
+
# Lists or arrays
|
| 112 |
+
try:
|
| 113 |
+
arr = np.asarray(value)
|
| 114 |
+
if arr.size == 0:
|
| 115 |
+
return {"size": 0}
|
| 116 |
+
return {
|
| 117 |
+
"mean": float(np.nanmean(arr)),
|
| 118 |
+
"min": float(np.nanmin(arr)),
|
| 119 |
+
"max": float(np.nanmax(arr)),
|
| 120 |
+
"last": float(arr.reshape(-1)[-1]),
|
| 121 |
+
"size": int(arr.size),
|
| 122 |
+
}
|
| 123 |
+
except Exception:
|
| 124 |
+
# Fallback: string repr
|
| 125 |
+
return {"text": str(value)}
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def log_tally(array_tally: Dict[str, Any], prefix: str = "", step: Optional[int] = None) -> None:
|
| 129 |
+
"""
|
| 130 |
+
Flatten and summarize Tally.array_tally and log to WandB.
|
| 131 |
+
Each leaf list/array is summarized with mean/min/max/last/size.
|
| 132 |
+
"""
|
| 133 |
+
if not _WANDB_AVAILABLE or _WANDB_RUN is None:
|
| 134 |
+
return
|
| 135 |
+
summarized: Dict[str, Any] = {}
|
| 136 |
+
|
| 137 |
+
def walk(node: Any, path: list[str]):
|
| 138 |
+
if isinstance(node, dict):
|
| 139 |
+
for k, v in node.items():
|
| 140 |
+
walk(v, path + [k])
|
| 141 |
+
return
|
| 142 |
+
# node is a list of values accumulated over time
|
| 143 |
+
key = ".".join([p for p in ([prefix] if prefix else []) + path])
|
| 144 |
+
try:
|
| 145 |
+
summary = _summarize_value(node)
|
| 146 |
+
for sk, sv in summary.items():
|
| 147 |
+
summarized[f"{key}.{sk}"] = sv
|
| 148 |
+
except Exception:
|
| 149 |
+
summarized[f"{key}.error"] = 1
|
| 150 |
+
|
| 151 |
+
walk(array_tally, [])
|
| 152 |
+
if summarized:
|
| 153 |
+
log(summarized, step=step)
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def log_flat_stats(stats: Dict[str, Any], prefix: str = "", step: Optional[int] = None) -> None:
|
| 157 |
+
if not _WANDB_AVAILABLE or _WANDB_RUN is None:
|
| 158 |
+
return
|
| 159 |
+
flat: Dict[str, Any] = {}
|
| 160 |
+
_flatten(prefix, stats, flat)
|
| 161 |
+
if flat:
|
| 162 |
+
log(flat, step=step)
|
| 163 |
+
|
| 164 |
+
|