Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .hydra/config.yaml +183 -0
- .hydra/hydra.yaml +154 -0
- .hydra/overrides.yaml +1 -0
- run.log +0 -0
- seed_0/Qwen/Qwen2.5-7B-Instruct/adapters/README.md +207 -0
- seed_0/Qwen/Qwen2.5-7B-Instruct/adapters/agent_adapter/adapter_config.json +42 -0
- seed_0/Qwen/Qwen2.5-7B-Instruct/adapters/critic_adapter/adapter_config.json +42 -0
- seed_0/Qwen/Qwen2.5-7B-Instruct/adapters/fixed_ad_align_adapter/adapter_config.json +42 -0
- src_code_for_reproducibility/__pycache__/__init__.cpython-312.pyc +0 -0
- src_code_for_reproducibility/chat_utils/chat_turn.py +27 -0
- src_code_for_reproducibility/docs/Makefile +19 -0
- src_code_for_reproducibility/docs/make.bat +35 -0
- src_code_for_reproducibility/docs/source/src.environments.dond.dond_agent.rst +7 -0
- src_code_for_reproducibility/docs/source/src.training.ppo_train.rst +7 -0
- src_code_for_reproducibility/docs/source/src.utils.common_imports.rst +7 -0
- src_code_for_reproducibility/docs/source/src.utils.parallel_shuffle.rst +7 -0
- src_code_for_reproducibility/markov_games/__init__.py +0 -0
- src_code_for_reproducibility/markov_games/__pycache__/__init__.cpython-312.pyc +0 -0
- src_code_for_reproducibility/markov_games/__pycache__/agent.cpython-312.pyc +0 -0
- src_code_for_reproducibility/markov_games/__pycache__/alternative_actions_runner.cpython-312.pyc +0 -0
- src_code_for_reproducibility/markov_games/__pycache__/gather_and_export_utils.cpython-312.pyc +0 -0
- src_code_for_reproducibility/markov_games/__pycache__/linear_runner.cpython-312.pyc +0 -0
- src_code_for_reproducibility/markov_games/__pycache__/markov_game.cpython-312.pyc +0 -0
- src_code_for_reproducibility/markov_games/__pycache__/mg_utils.cpython-312.pyc +0 -0
- src_code_for_reproducibility/markov_games/__pycache__/rollout_tree.cpython-312.pyc +0 -0
- src_code_for_reproducibility/markov_games/agent.py +76 -0
- src_code_for_reproducibility/markov_games/alternative_actions_runner.py +138 -0
- src_code_for_reproducibility/markov_games/diplomacy/diplomacy_env.py +230 -0
- src_code_for_reproducibility/markov_games/group_timesteps.py +150 -0
- src_code_for_reproducibility/markov_games/linear_runner.py +30 -0
- src_code_for_reproducibility/markov_games/mg_utils.py +89 -0
- src_code_for_reproducibility/markov_games/negotiation/README.md +40 -0
- src_code_for_reproducibility/markov_games/negotiation/dond_agent.py +61 -0
- src_code_for_reproducibility/markov_games/negotiation/dond_simulation.py +153 -0
- src_code_for_reproducibility/markov_games/rollout_tree.py +86 -0
- src_code_for_reproducibility/markov_games/run_markov_games.py +24 -0
- src_code_for_reproducibility/markov_games/simulation.py +87 -0
- src_code_for_reproducibility/markov_games/statistics_runner.py +405 -0
- src_code_for_reproducibility/markov_games/vine_ppo.py +10 -0
- src_code_for_reproducibility/models/__init__.py +0 -0
- src_code_for_reproducibility/models/__pycache__/__init__.cpython-312.pyc +0 -0
- src_code_for_reproducibility/models/__pycache__/adapter_training_wrapper.cpython-312.pyc +0 -0
- src_code_for_reproducibility/models/__pycache__/human_policy.cpython-312.pyc +0 -0
- src_code_for_reproducibility/models/__pycache__/inference_backend.cpython-312.pyc +0 -0
- src_code_for_reproducibility/models/__pycache__/inference_backend_dummy.cpython-312.pyc +0 -0
- src_code_for_reproducibility/models/__pycache__/inference_backend_sglang.cpython-312.pyc +0 -0
- src_code_for_reproducibility/models/__pycache__/inference_backend_vllm.cpython-312.pyc +0 -0
- src_code_for_reproducibility/models/__pycache__/large_language_model_api.cpython-312.pyc +0 -0
- src_code_for_reproducibility/models/__pycache__/large_language_model_local.cpython-312.pyc +0 -0
- src_code_for_reproducibility/models/adapter_training_wrapper.py +98 -0
.hydra/config.yaml
ADDED
|
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
experiment:
|
| 2 |
+
wandb_enabled: true
|
| 3 |
+
nb_epochs: 3000
|
| 4 |
+
nb_matches_per_iteration: 64
|
| 5 |
+
reinit_matches_each_it: true
|
| 6 |
+
checkpoint_every_n_iterations: 50
|
| 7 |
+
start_epoch: 0
|
| 8 |
+
resume_experiment: true
|
| 9 |
+
base_seed: 0
|
| 10 |
+
seed_group_size: 8
|
| 11 |
+
train: true
|
| 12 |
+
stat_methods_for_live_wandb: mllm.markov_games.negotiation.negotiation_statistics
|
| 13 |
+
name: naive_vs_fixed_ad_align_seed42
|
| 14 |
+
agent_buffer: false
|
| 15 |
+
keep_agent_buffer_count: ${lora_count}
|
| 16 |
+
agent_buffer_recent_k: -1
|
| 17 |
+
description: Trust-and-Split Rock Paper Scissors negotiation game
|
| 18 |
+
logging:
|
| 19 |
+
wandb:
|
| 20 |
+
enabled: false
|
| 21 |
+
project: llm-negotiation
|
| 22 |
+
entity: null
|
| 23 |
+
mode: online
|
| 24 |
+
name: null
|
| 25 |
+
group: null
|
| 26 |
+
tags: []
|
| 27 |
+
notes: null
|
| 28 |
+
temperature: 1.0
|
| 29 |
+
markov_games:
|
| 30 |
+
runner_method_name: LinearRunner
|
| 31 |
+
runner_kwargs: {}
|
| 32 |
+
group_by_round: true
|
| 33 |
+
simulation_class_name: TrustAndSplitRPSSimulation
|
| 34 |
+
simulation_init_args:
|
| 35 |
+
nb_of_rounds: 10
|
| 36 |
+
quota_messages_per_agent_per_round: 1
|
| 37 |
+
alternating_hands: false
|
| 38 |
+
agents:
|
| 39 |
+
0:
|
| 40 |
+
agent_id: ${agent_0_id}
|
| 41 |
+
agent_name: Alice
|
| 42 |
+
agent_class_name: TrustAndSplitRPSAgent
|
| 43 |
+
policy_id: base_llm/agent_adapter
|
| 44 |
+
init_kwargs:
|
| 45 |
+
goal: Maximize your total points over the whole game.
|
| 46 |
+
num_message_chars: 500
|
| 47 |
+
message_start_end_format: true
|
| 48 |
+
proposal_start_end_format: true
|
| 49 |
+
1:
|
| 50 |
+
agent_id: ${agent_1_id}
|
| 51 |
+
agent_name: Bob
|
| 52 |
+
agent_class_name: TrustAndSplitRPSAgent
|
| 53 |
+
policy_id: base_llm/fixed_ad_align_adapter
|
| 54 |
+
init_kwargs:
|
| 55 |
+
goal: Maximize your total points over the whole game.
|
| 56 |
+
num_message_chars: 500
|
| 57 |
+
message_start_end_format: true
|
| 58 |
+
proposal_start_end_format: true
|
| 59 |
+
models:
|
| 60 |
+
base_llm:
|
| 61 |
+
class: LeanLocalLLM
|
| 62 |
+
init_args:
|
| 63 |
+
llm_id: base_llm
|
| 64 |
+
model_name: Qwen/Qwen2.5-7B-Instruct
|
| 65 |
+
inference_backend: vllm
|
| 66 |
+
hf_kwargs:
|
| 67 |
+
device_map: auto
|
| 68 |
+
torch_dtype: bfloat16
|
| 69 |
+
max_memory:
|
| 70 |
+
0: 20GiB
|
| 71 |
+
attn_implementation: flash_attention_2
|
| 72 |
+
inference_backend_init_kwargs:
|
| 73 |
+
enable_lora: true
|
| 74 |
+
seed: ${experiment.base_seed}
|
| 75 |
+
enable_prefix_caching: true
|
| 76 |
+
max_model_len: 10000.0
|
| 77 |
+
gpu_memory_utilization: 0.5
|
| 78 |
+
dtype: bfloat16
|
| 79 |
+
trust_remote_code: true
|
| 80 |
+
max_lora_rank: 32
|
| 81 |
+
enforce_eager: false
|
| 82 |
+
max_loras: ${lora_count}
|
| 83 |
+
max_cpu_loras: ${lora_count}
|
| 84 |
+
enable_sleep_mode: true
|
| 85 |
+
inference_backend_sampling_params:
|
| 86 |
+
temperature: ${temperature}
|
| 87 |
+
top_p: 1.0
|
| 88 |
+
max_tokens: 400
|
| 89 |
+
top_k: -1
|
| 90 |
+
logprobs: 0
|
| 91 |
+
adapter_configs:
|
| 92 |
+
agent_adapter:
|
| 93 |
+
task_type: CAUSAL_LM
|
| 94 |
+
r: 32
|
| 95 |
+
lora_alpha: 64
|
| 96 |
+
lora_dropout: 0.0
|
| 97 |
+
target_modules: all-linear
|
| 98 |
+
critic_adapter:
|
| 99 |
+
task_type: CAUSAL_LM
|
| 100 |
+
r: 32
|
| 101 |
+
lora_alpha: 64
|
| 102 |
+
lora_dropout: 0.0
|
| 103 |
+
target_modules: all-linear
|
| 104 |
+
fixed_ad_align_adapter:
|
| 105 |
+
task_type: CAUSAL_LM
|
| 106 |
+
r: 32
|
| 107 |
+
lora_alpha: 64
|
| 108 |
+
lora_dropout: 0.0
|
| 109 |
+
target_modules: all-linear
|
| 110 |
+
enable_thinking: null
|
| 111 |
+
regex_max_attempts: 1
|
| 112 |
+
initial_adapter_paths:
|
| 113 |
+
fixed_ad_align_adapter: ${fixed_ad_align_adapter_path}
|
| 114 |
+
critics:
|
| 115 |
+
agent_critic:
|
| 116 |
+
module_pointer:
|
| 117 |
+
- base_llm
|
| 118 |
+
- critic_adapter
|
| 119 |
+
optimizers:
|
| 120 |
+
agent_optimizer:
|
| 121 |
+
module_pointer:
|
| 122 |
+
- base_llm
|
| 123 |
+
- agent_adapter
|
| 124 |
+
optimizer_class_name: torch.optim.Adam
|
| 125 |
+
init_args:
|
| 126 |
+
lr: 3.0e-06
|
| 127 |
+
weight_decay: 0.0
|
| 128 |
+
critic_optimizer:
|
| 129 |
+
module_pointer: agent_critic
|
| 130 |
+
optimizer_class_name: torch.optim.Adam
|
| 131 |
+
init_args:
|
| 132 |
+
lr: 3.0e-06
|
| 133 |
+
weight_decay: 0.0
|
| 134 |
+
trainers:
|
| 135 |
+
agent_trainer:
|
| 136 |
+
class: TrainerNaive
|
| 137 |
+
module_pointers:
|
| 138 |
+
policy:
|
| 139 |
+
- base_llm
|
| 140 |
+
- agent_adapter
|
| 141 |
+
policy_optimizer: agent_optimizer
|
| 142 |
+
critic: agent_critic
|
| 143 |
+
critic_optimizer: critic_optimizer
|
| 144 |
+
kwargs:
|
| 145 |
+
entropy_coeff: 0.0
|
| 146 |
+
entropy_topk: null
|
| 147 |
+
entropy_mask_regex: null
|
| 148 |
+
kl_coeff: 0.001
|
| 149 |
+
gradient_clipping: 1.0
|
| 150 |
+
restrict_tokens: null
|
| 151 |
+
mini_batch_size: 1
|
| 152 |
+
use_gradient_checkpointing: true
|
| 153 |
+
temperature: ${temperature}
|
| 154 |
+
device: cuda:0
|
| 155 |
+
use_gae: false
|
| 156 |
+
whiten_advantages: false
|
| 157 |
+
whiten_advantages_time_step_wise: false
|
| 158 |
+
skip_discounted_state_visitation: true
|
| 159 |
+
use_gae_lambda_annealing: false
|
| 160 |
+
gae_lambda_annealing_method: None
|
| 161 |
+
gae_lambda_annealing_method_params: None
|
| 162 |
+
gae_lambda_annealing_limit: 0.95
|
| 163 |
+
discount_factor: 0.96
|
| 164 |
+
use_rloo: true
|
| 165 |
+
enable_tokenwise_logging: false
|
| 166 |
+
pg_loss_normalization: nb_tokens
|
| 167 |
+
truncated_importance_sampling_ratio_cap: 2.0
|
| 168 |
+
reward_normalizing_constant: 100.0
|
| 169 |
+
train_on_which_data:
|
| 170 |
+
agent_trainer:
|
| 171 |
+
- Alice
|
| 172 |
+
lora_count: 30
|
| 173 |
+
common_agent_kwargs:
|
| 174 |
+
goal: Maximize your total points over the whole game.
|
| 175 |
+
num_message_chars: 500
|
| 176 |
+
message_start_end_format: true
|
| 177 |
+
proposal_start_end_format: true
|
| 178 |
+
agent_0_id: Alice
|
| 179 |
+
agent_1_id: Bob
|
| 180 |
+
agent_ids:
|
| 181 |
+
- Alice
|
| 182 |
+
- Bob
|
| 183 |
+
fixed_ad_align_adapter_path: /home/muqeeth/scratch/llm_negotiation/2025_11/tas_rps_startend_ad_align_nocurrtimestep_seed42_beta2/seed_42/Qwen/Qwen2.5-7B-Instruct/adapters/agent_adapter
|
.hydra/hydra.yaml
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
hydra:
|
| 2 |
+
run:
|
| 3 |
+
dir: ${oc.env:SCRATCH}/llm_negotiation/${now:%Y_%m}/${experiment.name}
|
| 4 |
+
sweep:
|
| 5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
| 6 |
+
subdir: ${hydra.job.num}
|
| 7 |
+
launcher:
|
| 8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
| 9 |
+
sweeper:
|
| 10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
| 11 |
+
max_batch_size: null
|
| 12 |
+
params: null
|
| 13 |
+
help:
|
| 14 |
+
app_name: ${hydra.job.name}
|
| 15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
| 16 |
+
|
| 17 |
+
'
|
| 18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
| 19 |
+
|
| 20 |
+
Use --hydra-help to view Hydra specific help
|
| 21 |
+
|
| 22 |
+
'
|
| 23 |
+
template: '${hydra.help.header}
|
| 24 |
+
|
| 25 |
+
== Configuration groups ==
|
| 26 |
+
|
| 27 |
+
Compose your configuration from those groups (group=option)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
$APP_CONFIG_GROUPS
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
== Config ==
|
| 34 |
+
|
| 35 |
+
Override anything in the config (foo.bar=value)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
$CONFIG
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
${hydra.help.footer}
|
| 42 |
+
|
| 43 |
+
'
|
| 44 |
+
hydra_help:
|
| 45 |
+
template: 'Hydra (${hydra.runtime.version})
|
| 46 |
+
|
| 47 |
+
See https://hydra.cc for more info.
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
== Flags ==
|
| 51 |
+
|
| 52 |
+
$FLAGS_HELP
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
== Configuration groups ==
|
| 56 |
+
|
| 57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
| 58 |
+
to command line)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
$HYDRA_CONFIG_GROUPS
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
| 65 |
+
|
| 66 |
+
'
|
| 67 |
+
hydra_help: ???
|
| 68 |
+
hydra_logging:
|
| 69 |
+
version: 1
|
| 70 |
+
formatters:
|
| 71 |
+
simple:
|
| 72 |
+
format: '[%(asctime)s][HYDRA] %(message)s'
|
| 73 |
+
handlers:
|
| 74 |
+
console:
|
| 75 |
+
class: logging.StreamHandler
|
| 76 |
+
formatter: simple
|
| 77 |
+
stream: ext://sys.stdout
|
| 78 |
+
root:
|
| 79 |
+
level: INFO
|
| 80 |
+
handlers:
|
| 81 |
+
- console
|
| 82 |
+
loggers:
|
| 83 |
+
logging_example:
|
| 84 |
+
level: DEBUG
|
| 85 |
+
disable_existing_loggers: false
|
| 86 |
+
job_logging:
|
| 87 |
+
version: 1
|
| 88 |
+
formatters:
|
| 89 |
+
simple:
|
| 90 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
| 91 |
+
handlers:
|
| 92 |
+
console:
|
| 93 |
+
class: logging.StreamHandler
|
| 94 |
+
formatter: simple
|
| 95 |
+
stream: ext://sys.stdout
|
| 96 |
+
file:
|
| 97 |
+
class: logging.FileHandler
|
| 98 |
+
formatter: simple
|
| 99 |
+
filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
|
| 100 |
+
root:
|
| 101 |
+
level: INFO
|
| 102 |
+
handlers:
|
| 103 |
+
- console
|
| 104 |
+
- file
|
| 105 |
+
disable_existing_loggers: false
|
| 106 |
+
env: {}
|
| 107 |
+
mode: RUN
|
| 108 |
+
searchpath: []
|
| 109 |
+
callbacks: {}
|
| 110 |
+
output_subdir: .hydra
|
| 111 |
+
overrides:
|
| 112 |
+
hydra:
|
| 113 |
+
- hydra.mode=RUN
|
| 114 |
+
task: []
|
| 115 |
+
job:
|
| 116 |
+
name: run
|
| 117 |
+
chdir: false
|
| 118 |
+
override_dirname: ''
|
| 119 |
+
id: ???
|
| 120 |
+
num: ???
|
| 121 |
+
config_name: naive_vs_fixed_ad_align_seed42.yaml
|
| 122 |
+
env_set: {}
|
| 123 |
+
env_copy: []
|
| 124 |
+
config:
|
| 125 |
+
override_dirname:
|
| 126 |
+
kv_sep: '='
|
| 127 |
+
item_sep: ','
|
| 128 |
+
exclude_keys: []
|
| 129 |
+
runtime:
|
| 130 |
+
version: 1.3.2
|
| 131 |
+
version_base: '1.1'
|
| 132 |
+
cwd: /scratch/muqeeth/llm_negotiation
|
| 133 |
+
config_sources:
|
| 134 |
+
- path: hydra.conf
|
| 135 |
+
schema: pkg
|
| 136 |
+
provider: hydra
|
| 137 |
+
- path: /scratch/muqeeth/llm_negotiation/configs
|
| 138 |
+
schema: file
|
| 139 |
+
provider: main
|
| 140 |
+
- path: ''
|
| 141 |
+
schema: structured
|
| 142 |
+
provider: schema
|
| 143 |
+
output_dir: /scratch/muqeeth/llm_negotiation/2025_11/naive_vs_fixed_ad_align_seed42
|
| 144 |
+
choices:
|
| 145 |
+
hydra/env: default
|
| 146 |
+
hydra/callbacks: null
|
| 147 |
+
hydra/job_logging: default
|
| 148 |
+
hydra/hydra_logging: default
|
| 149 |
+
hydra/hydra_help: default
|
| 150 |
+
hydra/help: default
|
| 151 |
+
hydra/sweeper: basic
|
| 152 |
+
hydra/launcher: basic
|
| 153 |
+
hydra/output: default
|
| 154 |
+
verbose: false
|
.hydra/overrides.yaml
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
[]
|
run.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
seed_0/Qwen/Qwen2.5-7B-Instruct/adapters/README.md
ADDED
|
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
base_model: Qwen/Qwen2.5-7B-Instruct
|
| 3 |
+
library_name: peft
|
| 4 |
+
pipeline_tag: text-generation
|
| 5 |
+
tags:
|
| 6 |
+
- base_model:adapter:Qwen/Qwen2.5-7B-Instruct
|
| 7 |
+
- lora
|
| 8 |
+
- transformers
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
# Model Card for Model ID
|
| 12 |
+
|
| 13 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
## Model Details
|
| 18 |
+
|
| 19 |
+
### Model Description
|
| 20 |
+
|
| 21 |
+
<!-- Provide a longer summary of what this model is. -->
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
- **Developed by:** [More Information Needed]
|
| 26 |
+
- **Funded by [optional]:** [More Information Needed]
|
| 27 |
+
- **Shared by [optional]:** [More Information Needed]
|
| 28 |
+
- **Model type:** [More Information Needed]
|
| 29 |
+
- **Language(s) (NLP):** [More Information Needed]
|
| 30 |
+
- **License:** [More Information Needed]
|
| 31 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
| 32 |
+
|
| 33 |
+
### Model Sources [optional]
|
| 34 |
+
|
| 35 |
+
<!-- Provide the basic links for the model. -->
|
| 36 |
+
|
| 37 |
+
- **Repository:** [More Information Needed]
|
| 38 |
+
- **Paper [optional]:** [More Information Needed]
|
| 39 |
+
- **Demo [optional]:** [More Information Needed]
|
| 40 |
+
|
| 41 |
+
## Uses
|
| 42 |
+
|
| 43 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
| 44 |
+
|
| 45 |
+
### Direct Use
|
| 46 |
+
|
| 47 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
| 48 |
+
|
| 49 |
+
[More Information Needed]
|
| 50 |
+
|
| 51 |
+
### Downstream Use [optional]
|
| 52 |
+
|
| 53 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
| 54 |
+
|
| 55 |
+
[More Information Needed]
|
| 56 |
+
|
| 57 |
+
### Out-of-Scope Use
|
| 58 |
+
|
| 59 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
| 60 |
+
|
| 61 |
+
[More Information Needed]
|
| 62 |
+
|
| 63 |
+
## Bias, Risks, and Limitations
|
| 64 |
+
|
| 65 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
| 66 |
+
|
| 67 |
+
[More Information Needed]
|
| 68 |
+
|
| 69 |
+
### Recommendations
|
| 70 |
+
|
| 71 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
| 72 |
+
|
| 73 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
| 74 |
+
|
| 75 |
+
## How to Get Started with the Model
|
| 76 |
+
|
| 77 |
+
Use the code below to get started with the model.
|
| 78 |
+
|
| 79 |
+
[More Information Needed]
|
| 80 |
+
|
| 81 |
+
## Training Details
|
| 82 |
+
|
| 83 |
+
### Training Data
|
| 84 |
+
|
| 85 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
| 86 |
+
|
| 87 |
+
[More Information Needed]
|
| 88 |
+
|
| 89 |
+
### Training Procedure
|
| 90 |
+
|
| 91 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
| 92 |
+
|
| 93 |
+
#### Preprocessing [optional]
|
| 94 |
+
|
| 95 |
+
[More Information Needed]
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
#### Training Hyperparameters
|
| 99 |
+
|
| 100 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
| 101 |
+
|
| 102 |
+
#### Speeds, Sizes, Times [optional]
|
| 103 |
+
|
| 104 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
| 105 |
+
|
| 106 |
+
[More Information Needed]
|
| 107 |
+
|
| 108 |
+
## Evaluation
|
| 109 |
+
|
| 110 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
| 111 |
+
|
| 112 |
+
### Testing Data, Factors & Metrics
|
| 113 |
+
|
| 114 |
+
#### Testing Data
|
| 115 |
+
|
| 116 |
+
<!-- This should link to a Dataset Card if possible. -->
|
| 117 |
+
|
| 118 |
+
[More Information Needed]
|
| 119 |
+
|
| 120 |
+
#### Factors
|
| 121 |
+
|
| 122 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
| 123 |
+
|
| 124 |
+
[More Information Needed]
|
| 125 |
+
|
| 126 |
+
#### Metrics
|
| 127 |
+
|
| 128 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
| 129 |
+
|
| 130 |
+
[More Information Needed]
|
| 131 |
+
|
| 132 |
+
### Results
|
| 133 |
+
|
| 134 |
+
[More Information Needed]
|
| 135 |
+
|
| 136 |
+
#### Summary
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
## Model Examination [optional]
|
| 141 |
+
|
| 142 |
+
<!-- Relevant interpretability work for the model goes here -->
|
| 143 |
+
|
| 144 |
+
[More Information Needed]
|
| 145 |
+
|
| 146 |
+
## Environmental Impact
|
| 147 |
+
|
| 148 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
| 149 |
+
|
| 150 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
| 151 |
+
|
| 152 |
+
- **Hardware Type:** [More Information Needed]
|
| 153 |
+
- **Hours used:** [More Information Needed]
|
| 154 |
+
- **Cloud Provider:** [More Information Needed]
|
| 155 |
+
- **Compute Region:** [More Information Needed]
|
| 156 |
+
- **Carbon Emitted:** [More Information Needed]
|
| 157 |
+
|
| 158 |
+
## Technical Specifications [optional]
|
| 159 |
+
|
| 160 |
+
### Model Architecture and Objective
|
| 161 |
+
|
| 162 |
+
[More Information Needed]
|
| 163 |
+
|
| 164 |
+
### Compute Infrastructure
|
| 165 |
+
|
| 166 |
+
[More Information Needed]
|
| 167 |
+
|
| 168 |
+
#### Hardware
|
| 169 |
+
|
| 170 |
+
[More Information Needed]
|
| 171 |
+
|
| 172 |
+
#### Software
|
| 173 |
+
|
| 174 |
+
[More Information Needed]
|
| 175 |
+
|
| 176 |
+
## Citation [optional]
|
| 177 |
+
|
| 178 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
| 179 |
+
|
| 180 |
+
**BibTeX:**
|
| 181 |
+
|
| 182 |
+
[More Information Needed]
|
| 183 |
+
|
| 184 |
+
**APA:**
|
| 185 |
+
|
| 186 |
+
[More Information Needed]
|
| 187 |
+
|
| 188 |
+
## Glossary [optional]
|
| 189 |
+
|
| 190 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
| 191 |
+
|
| 192 |
+
[More Information Needed]
|
| 193 |
+
|
| 194 |
+
## More Information [optional]
|
| 195 |
+
|
| 196 |
+
[More Information Needed]
|
| 197 |
+
|
| 198 |
+
## Model Card Authors [optional]
|
| 199 |
+
|
| 200 |
+
[More Information Needed]
|
| 201 |
+
|
| 202 |
+
## Model Card Contact
|
| 203 |
+
|
| 204 |
+
[More Information Needed]
|
| 205 |
+
### Framework versions
|
| 206 |
+
|
| 207 |
+
- PEFT 0.17.1
|
seed_0/Qwen/Qwen2.5-7B-Instruct/adapters/agent_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": "Qwen/Qwen2.5-7B-Instruct",
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"corda_config": null,
|
| 7 |
+
"eva_config": null,
|
| 8 |
+
"exclude_modules": null,
|
| 9 |
+
"fan_in_fan_out": false,
|
| 10 |
+
"inference_mode": true,
|
| 11 |
+
"init_lora_weights": true,
|
| 12 |
+
"layer_replication": null,
|
| 13 |
+
"layers_pattern": null,
|
| 14 |
+
"layers_to_transform": null,
|
| 15 |
+
"loftq_config": {},
|
| 16 |
+
"lora_alpha": 64,
|
| 17 |
+
"lora_bias": false,
|
| 18 |
+
"lora_dropout": 0.0,
|
| 19 |
+
"megatron_config": null,
|
| 20 |
+
"megatron_core": "megatron.core",
|
| 21 |
+
"modules_to_save": null,
|
| 22 |
+
"peft_type": "LORA",
|
| 23 |
+
"qalora_group_size": 16,
|
| 24 |
+
"r": 32,
|
| 25 |
+
"rank_pattern": {},
|
| 26 |
+
"revision": null,
|
| 27 |
+
"target_modules": [
|
| 28 |
+
"o_proj",
|
| 29 |
+
"k_proj",
|
| 30 |
+
"v_proj",
|
| 31 |
+
"down_proj",
|
| 32 |
+
"up_proj",
|
| 33 |
+
"q_proj",
|
| 34 |
+
"gate_proj"
|
| 35 |
+
],
|
| 36 |
+
"target_parameters": null,
|
| 37 |
+
"task_type": "CAUSAL_LM",
|
| 38 |
+
"trainable_token_indices": null,
|
| 39 |
+
"use_dora": false,
|
| 40 |
+
"use_qalora": false,
|
| 41 |
+
"use_rslora": false
|
| 42 |
+
}
|
seed_0/Qwen/Qwen2.5-7B-Instruct/adapters/critic_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": "Qwen/Qwen2.5-7B-Instruct",
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"corda_config": null,
|
| 7 |
+
"eva_config": null,
|
| 8 |
+
"exclude_modules": null,
|
| 9 |
+
"fan_in_fan_out": false,
|
| 10 |
+
"inference_mode": true,
|
| 11 |
+
"init_lora_weights": true,
|
| 12 |
+
"layer_replication": null,
|
| 13 |
+
"layers_pattern": null,
|
| 14 |
+
"layers_to_transform": null,
|
| 15 |
+
"loftq_config": {},
|
| 16 |
+
"lora_alpha": 64,
|
| 17 |
+
"lora_bias": false,
|
| 18 |
+
"lora_dropout": 0.0,
|
| 19 |
+
"megatron_config": null,
|
| 20 |
+
"megatron_core": "megatron.core",
|
| 21 |
+
"modules_to_save": null,
|
| 22 |
+
"peft_type": "LORA",
|
| 23 |
+
"qalora_group_size": 16,
|
| 24 |
+
"r": 32,
|
| 25 |
+
"rank_pattern": {},
|
| 26 |
+
"revision": null,
|
| 27 |
+
"target_modules": [
|
| 28 |
+
"o_proj",
|
| 29 |
+
"k_proj",
|
| 30 |
+
"v_proj",
|
| 31 |
+
"down_proj",
|
| 32 |
+
"up_proj",
|
| 33 |
+
"q_proj",
|
| 34 |
+
"gate_proj"
|
| 35 |
+
],
|
| 36 |
+
"target_parameters": null,
|
| 37 |
+
"task_type": "CAUSAL_LM",
|
| 38 |
+
"trainable_token_indices": null,
|
| 39 |
+
"use_dora": false,
|
| 40 |
+
"use_qalora": false,
|
| 41 |
+
"use_rslora": false
|
| 42 |
+
}
|
seed_0/Qwen/Qwen2.5-7B-Instruct/adapters/fixed_ad_align_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": "Qwen/Qwen2.5-7B-Instruct",
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"corda_config": null,
|
| 7 |
+
"eva_config": null,
|
| 8 |
+
"exclude_modules": null,
|
| 9 |
+
"fan_in_fan_out": false,
|
| 10 |
+
"inference_mode": true,
|
| 11 |
+
"init_lora_weights": true,
|
| 12 |
+
"layer_replication": null,
|
| 13 |
+
"layers_pattern": null,
|
| 14 |
+
"layers_to_transform": null,
|
| 15 |
+
"loftq_config": {},
|
| 16 |
+
"lora_alpha": 64,
|
| 17 |
+
"lora_bias": false,
|
| 18 |
+
"lora_dropout": 0.0,
|
| 19 |
+
"megatron_config": null,
|
| 20 |
+
"megatron_core": "megatron.core",
|
| 21 |
+
"modules_to_save": null,
|
| 22 |
+
"peft_type": "LORA",
|
| 23 |
+
"qalora_group_size": 16,
|
| 24 |
+
"r": 32,
|
| 25 |
+
"rank_pattern": {},
|
| 26 |
+
"revision": null,
|
| 27 |
+
"target_modules": [
|
| 28 |
+
"o_proj",
|
| 29 |
+
"k_proj",
|
| 30 |
+
"v_proj",
|
| 31 |
+
"down_proj",
|
| 32 |
+
"up_proj",
|
| 33 |
+
"q_proj",
|
| 34 |
+
"gate_proj"
|
| 35 |
+
],
|
| 36 |
+
"target_parameters": null,
|
| 37 |
+
"task_type": "CAUSAL_LM",
|
| 38 |
+
"trainable_token_indices": null,
|
| 39 |
+
"use_dora": false,
|
| 40 |
+
"use_qalora": false,
|
| 41 |
+
"use_rslora": false
|
| 42 |
+
}
|
src_code_for_reproducibility/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (146 Bytes). View file
|
|
|
src_code_for_reproducibility/chat_utils/chat_turn.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import json
|
| 4 |
+
from dataclasses import dataclass
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from typing import Any, List, Literal, Optional, Tuple
|
| 7 |
+
|
| 8 |
+
import jsonschema
|
| 9 |
+
import torch
|
| 10 |
+
from pydantic import BaseModel, ConfigDict, Field, model_validator
|
| 11 |
+
|
| 12 |
+
AgentId = str
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class ChatTurn(BaseModel):
|
| 16 |
+
model_config = ConfigDict(arbitrary_types_allowed=True) # needed for torch tensors
|
| 17 |
+
|
| 18 |
+
role: str = Field(pattern="^(user|assistant)$")
|
| 19 |
+
agent_id: AgentId # ID of the agent with which the chat occured
|
| 20 |
+
content: str
|
| 21 |
+
reasoning_content: str | None = None
|
| 22 |
+
chat_template_token_ids: torch.LongTensor | None = None # Token ids of chat template format. For example, token ids of "<assistant>{content}</assistant>""
|
| 23 |
+
out_token_ids: torch.LongTensor | None = (
|
| 24 |
+
None # tokens generated from inference engine
|
| 25 |
+
)
|
| 26 |
+
log_probs: torch.FloatTensor | None = None
|
| 27 |
+
is_state_end: bool = False # indicates whether this chat turn marks the end of a state in the trajectory
|
src_code_for_reproducibility/docs/Makefile
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Minimal makefile for Sphinx documentation
|
| 2 |
+
|
| 3 |
+
# You can set these variables from the command line, and also
|
| 4 |
+
# from the environment for the first two.
|
| 5 |
+
SPHINXOPTS ?=
|
| 6 |
+
SPHINXBUILD ?= sphinx-build
|
| 7 |
+
SOURCEDIR = source
|
| 8 |
+
BUILDDIR = build
|
| 9 |
+
|
| 10 |
+
# Put it first so that "make" without argument is like "make help".
|
| 11 |
+
help:
|
| 12 |
+
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(SPHINXFLAGS)
|
| 13 |
+
|
| 14 |
+
.PHONY: help Makefile
|
| 15 |
+
|
| 16 |
+
# Catch-all target: route all unknown targets to Sphinx using the new
|
| 17 |
+
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
| 18 |
+
%: Makefile
|
| 19 |
+
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(SPHINXFLAGS)
|
src_code_for_reproducibility/docs/make.bat
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
@ECHO OFF
|
| 2 |
+
|
| 3 |
+
pushd %~dp0
|
| 4 |
+
|
| 5 |
+
REM Command file for Sphinx documentation
|
| 6 |
+
|
| 7 |
+
if "%SPHINXBUILD%" == "" (
|
| 8 |
+
set SPHINXBUILD=sphinx-build
|
| 9 |
+
)
|
| 10 |
+
set SOURCEDIR=source
|
| 11 |
+
set BUILDDIR=build
|
| 12 |
+
|
| 13 |
+
%SPHINXBUILD% >NUL 2>NUL
|
| 14 |
+
if errorlevel 9009 (
|
| 15 |
+
echo.
|
| 16 |
+
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
|
| 17 |
+
echo.installed, then set the SPHINXBUILD environment variable to point
|
| 18 |
+
echo.to the full path of the 'sphinx-build' executable. Alternatively you
|
| 19 |
+
echo.may add the Sphinx directory to PATH.
|
| 20 |
+
echo.
|
| 21 |
+
echo.If you don't have Sphinx installed, grab it from
|
| 22 |
+
echo.https://www.sphinx-doc.org/
|
| 23 |
+
exit /b 1
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
if "%1" == "" goto help
|
| 27 |
+
|
| 28 |
+
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
|
| 29 |
+
goto end
|
| 30 |
+
|
| 31 |
+
:help
|
| 32 |
+
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
|
| 33 |
+
|
| 34 |
+
:end
|
| 35 |
+
popd
|
src_code_for_reproducibility/docs/source/src.environments.dond.dond_agent.rst
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
src.environments.dond.dond\_agent module
|
| 2 |
+
========================================
|
| 3 |
+
|
| 4 |
+
.. automodule:: src.environments.dond.dond_agent
|
| 5 |
+
:members:
|
| 6 |
+
:undoc-members:
|
| 7 |
+
:show-inheritance:
|
src_code_for_reproducibility/docs/source/src.training.ppo_train.rst
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
src.training.ppo\_train module
|
| 2 |
+
==============================
|
| 3 |
+
|
| 4 |
+
.. automodule:: src.training.ppo_train
|
| 5 |
+
:members:
|
| 6 |
+
:undoc-members:
|
| 7 |
+
:show-inheritance:
|
src_code_for_reproducibility/docs/source/src.utils.common_imports.rst
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
src.utils.common\_imports module
|
| 2 |
+
================================
|
| 3 |
+
|
| 4 |
+
.. automodule:: src.utils.common_imports
|
| 5 |
+
:members:
|
| 6 |
+
:undoc-members:
|
| 7 |
+
:show-inheritance:
|
src_code_for_reproducibility/docs/source/src.utils.parallel_shuffle.rst
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
src.utils.parallel\_shuffle module
|
| 2 |
+
==================================
|
| 3 |
+
|
| 4 |
+
.. automodule:: src.utils.parallel_shuffle
|
| 5 |
+
:members:
|
| 6 |
+
:undoc-members:
|
| 7 |
+
:show-inheritance:
|
src_code_for_reproducibility/markov_games/__init__.py
ADDED
|
File without changes
|
src_code_for_reproducibility/markov_games/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (159 Bytes). View file
|
|
|
src_code_for_reproducibility/markov_games/__pycache__/agent.cpython-312.pyc
ADDED
|
Binary file (3.2 kB). View file
|
|
|
src_code_for_reproducibility/markov_games/__pycache__/alternative_actions_runner.cpython-312.pyc
ADDED
|
Binary file (4.95 kB). View file
|
|
|
src_code_for_reproducibility/markov_games/__pycache__/gather_and_export_utils.cpython-312.pyc
ADDED
|
Binary file (46.5 kB). View file
|
|
|
src_code_for_reproducibility/markov_games/__pycache__/linear_runner.cpython-312.pyc
ADDED
|
Binary file (1.25 kB). View file
|
|
|
src_code_for_reproducibility/markov_games/__pycache__/markov_game.cpython-312.pyc
ADDED
|
Binary file (9.72 kB). View file
|
|
|
src_code_for_reproducibility/markov_games/__pycache__/mg_utils.cpython-312.pyc
ADDED
|
Binary file (3.98 kB). View file
|
|
|
src_code_for_reproducibility/markov_games/__pycache__/rollout_tree.cpython-312.pyc
ADDED
|
Binary file (3.67 kB). View file
|
|
|
src_code_for_reproducibility/markov_games/agent.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
In simple RL paradise, where the action dimensions are constant and well defined,
|
| 3 |
+
Agent classes are not necessary. But in MARL, with LLM's, there isn't always
|
| 4 |
+
a direct path from policy to action. For instance, from the observation of the environment,
|
| 5 |
+
a prompt must be created. Then, the outputs of the policy might be incorrect, so a second
|
| 6 |
+
request to the LLM must be sent before the action is well defined. This is why this Agent class exists.
|
| 7 |
+
It acts as a mini environment, bridging the gap between the core simulation and
|
| 8 |
+
the LLM policies.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from abc import ABC, abstractmethod
|
| 12 |
+
from collections.abc import Callable
|
| 13 |
+
from typing import Any, Tuple
|
| 14 |
+
|
| 15 |
+
from numpy.random import default_rng
|
| 16 |
+
|
| 17 |
+
from mllm.markov_games.rollout_tree import AgentActLog
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class Agent(ABC):
|
| 21 |
+
@abstractmethod
|
| 22 |
+
def __init__(
|
| 23 |
+
self,
|
| 24 |
+
seed: int,
|
| 25 |
+
agent_id: str,
|
| 26 |
+
agent_name: str,
|
| 27 |
+
agent_policy: Callable[[list[dict]], str],
|
| 28 |
+
*args,
|
| 29 |
+
**kwargs,
|
| 30 |
+
):
|
| 31 |
+
"""
|
| 32 |
+
Initialize the agent state.
|
| 33 |
+
"""
|
| 34 |
+
self.seed = seed
|
| 35 |
+
self.agent_id = agent_id
|
| 36 |
+
self.agent_name = agent_name
|
| 37 |
+
self.policy = policy
|
| 38 |
+
self.rng = default_rng(self.seed)
|
| 39 |
+
raise NotImplementedError
|
| 40 |
+
|
| 41 |
+
async def act(self, observation) -> Tuple[Any, AgentActLog]:
|
| 42 |
+
"""
|
| 43 |
+
Query (possibly multiple times) a policy (or possibly a pool of policies) to
|
| 44 |
+
obtain the action of the agent.
|
| 45 |
+
|
| 46 |
+
Example:
|
| 47 |
+
action = None
|
| 48 |
+
prompt = self.observation_to_prompt(observation)
|
| 49 |
+
while not self.valid(action):
|
| 50 |
+
output = await self.policy.generate(prompt)
|
| 51 |
+
action = self.policy_output_to_action(output)
|
| 52 |
+
return action
|
| 53 |
+
|
| 54 |
+
Returns:
|
| 55 |
+
action
|
| 56 |
+
step_info
|
| 57 |
+
"""
|
| 58 |
+
raise NotImplementedError
|
| 59 |
+
|
| 60 |
+
def get_safe_copy(self):
|
| 61 |
+
"""
|
| 62 |
+
Return copy of the agent object that is decorrelated from the original object.
|
| 63 |
+
"""
|
| 64 |
+
raise NotImplementedError
|
| 65 |
+
|
| 66 |
+
def reset(self):
|
| 67 |
+
raise NotImplementedError
|
| 68 |
+
|
| 69 |
+
def render(self):
|
| 70 |
+
raise NotImplementedError
|
| 71 |
+
|
| 72 |
+
def close(self):
|
| 73 |
+
raise NotImplementedError
|
| 74 |
+
|
| 75 |
+
def get_agent_info(self):
|
| 76 |
+
raise NotImplementedError
|
src_code_for_reproducibility/markov_games/alternative_actions_runner.py
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import copy
|
| 3 |
+
import json
|
| 4 |
+
import os.path
|
| 5 |
+
from typing import Any, Tuple
|
| 6 |
+
|
| 7 |
+
from mllm.markov_games.markov_game import AgentAndActionSafeCopy, MarkovGame
|
| 8 |
+
from mllm.markov_games.rollout_tree import (
|
| 9 |
+
AgentActLog,
|
| 10 |
+
RolloutTreeBranchNode,
|
| 11 |
+
RolloutTreeNode,
|
| 12 |
+
RolloutTreeRootNode,
|
| 13 |
+
StepLog,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
AgentId = str
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
async def run_with_unilateral_alt_action(
|
| 21 |
+
markov_game: MarkovGame,
|
| 22 |
+
agent_id: AgentId,
|
| 23 |
+
time_step: int,
|
| 24 |
+
branch_node: RolloutTreeBranchNode,
|
| 25 |
+
max_depth: int,
|
| 26 |
+
):
|
| 27 |
+
"""
|
| 28 |
+
This function is used to generate a new branch for a given agent.
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
# Generate alternative action and take a step
|
| 32 |
+
await markov_game.set_action_of_agent(agent_id)
|
| 33 |
+
terminated: bool = markov_game.take_simulation_step()
|
| 34 |
+
step_log = markov_game.get_step_log()
|
| 35 |
+
first_alternative_node = RolloutTreeNode(
|
| 36 |
+
step_log=step_log,
|
| 37 |
+
time_step=time_step,
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
# Generate rest of trajectory up to max depth
|
| 41 |
+
time_step += 1
|
| 42 |
+
counter = 1
|
| 43 |
+
previous_node = first_alternative_node
|
| 44 |
+
while not terminated and counter <= max_depth:
|
| 45 |
+
terminated, step_log = await markov_game.step()
|
| 46 |
+
current_node = RolloutTreeNode(step_log=step_log, time_step=time_step)
|
| 47 |
+
previous_node.child = current_node
|
| 48 |
+
previous_node = current_node
|
| 49 |
+
counter += 1
|
| 50 |
+
time_step += 1
|
| 51 |
+
|
| 52 |
+
if branch_node.branches == None:
|
| 53 |
+
branch_node.branches = {agent_id: [first_alternative_node]}
|
| 54 |
+
else:
|
| 55 |
+
agent_branches = branch_node.branches.get(agent_id, [])
|
| 56 |
+
agent_branches.append(first_alternative_node)
|
| 57 |
+
branch_node.branches[agent_id] = agent_branches
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
async def AlternativeActionsRunner(
|
| 61 |
+
markov_game: MarkovGame,
|
| 62 |
+
output_folder: str,
|
| 63 |
+
nb_alternative_actions: int,
|
| 64 |
+
max_depth: int,
|
| 65 |
+
branch_only_on_new_round: bool = False,
|
| 66 |
+
):
|
| 67 |
+
"""
|
| 68 |
+
This method generates a trajectory with partially completed branches,
|
| 69 |
+
where the branching comes from taking unilateraly different actions.
|
| 70 |
+
The resulting data is used to estimate the updated advantage alignment policy gradient terms.
|
| 71 |
+
Let k := nb_sub_steps. Then the number of steps generated is O(Tk), where T is
|
| 72 |
+
the maximum trajectory length.
|
| 73 |
+
"""
|
| 74 |
+
|
| 75 |
+
tasks = []
|
| 76 |
+
time_step = 0
|
| 77 |
+
terminated = False
|
| 78 |
+
root = RolloutTreeRootNode(
|
| 79 |
+
id=markov_game.get_id(),
|
| 80 |
+
crn_id=markov_game.get_crn_id()
|
| 81 |
+
)
|
| 82 |
+
previous_node = root
|
| 83 |
+
|
| 84 |
+
while not terminated:
|
| 85 |
+
mg_before_action = markov_game.get_safe_copy()
|
| 86 |
+
|
| 87 |
+
# Get safe copies for main branch
|
| 88 |
+
agent_action_safe_copies: dict[
|
| 89 |
+
AgentId, AgentAndActionSafeCopy
|
| 90 |
+
] = await markov_game.get_actions_of_agents_without_side_effects()
|
| 91 |
+
|
| 92 |
+
markov_game.set_actions_of_agents_manually(agent_action_safe_copies)
|
| 93 |
+
terminated = markov_game.take_simulation_step()
|
| 94 |
+
main_node = RolloutTreeNode(
|
| 95 |
+
step_log=markov_game.get_step_log(), time_step=time_step
|
| 96 |
+
)
|
| 97 |
+
branch_node = RolloutTreeBranchNode(main_child=main_node)
|
| 98 |
+
previous_node.child = branch_node
|
| 99 |
+
previous_node = main_node
|
| 100 |
+
|
| 101 |
+
# Get alternative branches by generating new unilateral actions
|
| 102 |
+
for agent_id in markov_game.agent_ids:
|
| 103 |
+
for _ in range(nb_alternative_actions):
|
| 104 |
+
# Get safe copies for branches
|
| 105 |
+
branch_agent_action_safe_copies: dict[
|
| 106 |
+
AgentId, AgentAndActionSafeCopy
|
| 107 |
+
] = {
|
| 108 |
+
agent_id: AgentAndActionSafeCopy(
|
| 109 |
+
action=copy.deepcopy(agent_action_safe_copy.action),
|
| 110 |
+
action_info=copy.deepcopy(agent_action_safe_copy.action_info),
|
| 111 |
+
agent_after_action=agent_action_safe_copy.agent_after_action.get_safe_copy(),
|
| 112 |
+
)
|
| 113 |
+
for agent_id, agent_action_safe_copy in agent_action_safe_copies.items()
|
| 114 |
+
}
|
| 115 |
+
mg_branch: MarkovGame = mg_before_action.get_safe_copy()
|
| 116 |
+
other_agent_id = [id for id in mg_branch.agent_ids if id != agent_id][0]
|
| 117 |
+
mg_branch.set_action_and_agent_after_action_manually(
|
| 118 |
+
agent_id=other_agent_id,
|
| 119 |
+
agent_action_safe_copy=branch_agent_action_safe_copies[
|
| 120 |
+
other_agent_id
|
| 121 |
+
],
|
| 122 |
+
)
|
| 123 |
+
task = asyncio.create_task(
|
| 124 |
+
run_with_unilateral_alt_action(
|
| 125 |
+
markov_game=mg_branch,
|
| 126 |
+
time_step=time_step,
|
| 127 |
+
agent_id=agent_id,
|
| 128 |
+
branch_node=branch_node,
|
| 129 |
+
max_depth=max_depth,
|
| 130 |
+
)
|
| 131 |
+
)
|
| 132 |
+
tasks.append(task)
|
| 133 |
+
time_step += 1
|
| 134 |
+
|
| 135 |
+
# wait for all branches to complete
|
| 136 |
+
await asyncio.gather(*tasks)
|
| 137 |
+
|
| 138 |
+
return root
|
src_code_for_reproducibility/markov_games/diplomacy/diplomacy_env.py
ADDED
|
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, List, Tuple, Optional, Any
|
| 2 |
+
from diplomacy import Game
|
| 3 |
+
import random
|
| 4 |
+
|
| 5 |
+
class DiplomacyEnv:
|
| 6 |
+
"""Multi-Agent Reinforcement Learning environment for Diplomacy.
|
| 7 |
+
|
| 8 |
+
This class wraps the Diplomacy game engine to provide an interface
|
| 9 |
+
compliant with the MARL standard.
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
def __init__(self, random_seed=None, map_name="standard", game_id=None, rules=None, max_steps=50):
|
| 13 |
+
"""Initialize the Diplomacy environment.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
map_name: The name of the map to use (default: "standard")
|
| 17 |
+
game_id: Optional game ID
|
| 18 |
+
rules: Optional rules to apply to the game
|
| 19 |
+
max_steps: Maximum number of steps before forcing game end (default: 10)
|
| 20 |
+
"""
|
| 21 |
+
self.random_seed = random_seed
|
| 22 |
+
self.map_name = map_name
|
| 23 |
+
self.game_id = game_id
|
| 24 |
+
self.rules = rules or []
|
| 25 |
+
self.game = None
|
| 26 |
+
self.active_powers = []
|
| 27 |
+
self.render_mode = None
|
| 28 |
+
self.max_steps = max_steps
|
| 29 |
+
self.current_steps = 0
|
| 30 |
+
|
| 31 |
+
def reset(self):
|
| 32 |
+
"""Reset the environment to an initial state and return the initial observation.
|
| 33 |
+
|
| 34 |
+
Returns:
|
| 35 |
+
observation: A dictionary where keys are agent identifiers and values are observations.
|
| 36 |
+
"""
|
| 37 |
+
# Initialize a new game
|
| 38 |
+
self.game = Game(game_id=self.game_id, map_name=self.map_name)
|
| 39 |
+
|
| 40 |
+
# Apply rules
|
| 41 |
+
for rule in self.rules:
|
| 42 |
+
self.game.add_rule(rule)
|
| 43 |
+
|
| 44 |
+
# Determine active powers (not eliminated)
|
| 45 |
+
self.active_powers = [name for name, power in self.game.powers.items()
|
| 46 |
+
if not power.is_eliminated()]
|
| 47 |
+
|
| 48 |
+
# Reset step counter
|
| 49 |
+
self.current_steps = 0
|
| 50 |
+
|
| 51 |
+
# Create initial observations for all powers
|
| 52 |
+
observations = {}
|
| 53 |
+
for power_name in self.active_powers:
|
| 54 |
+
observations[power_name] = self._create_observation(power_name)
|
| 55 |
+
|
| 56 |
+
return observations
|
| 57 |
+
|
| 58 |
+
def step(self, actions):
|
| 59 |
+
"""Take a step in the environment using the provided actions.
|
| 60 |
+
|
| 61 |
+
Args:
|
| 62 |
+
actions: A dictionary where keys are agent identifiers and values are actions.
|
| 63 |
+
|
| 64 |
+
Returns:
|
| 65 |
+
observations: A dictionary where keys are agent identifiers and values are observations.
|
| 66 |
+
done: Whether the episode has ended.
|
| 67 |
+
info: Additional information about the environment.
|
| 68 |
+
"""
|
| 69 |
+
print(f"stepping {self.current_steps}")
|
| 70 |
+
self.current_steps += 1
|
| 71 |
+
# Apply actions (orders) for each power
|
| 72 |
+
for power_name, action in actions.items():
|
| 73 |
+
if power_name in self.active_powers:
|
| 74 |
+
orders = action.get("orders", [])
|
| 75 |
+
wait = action.get("wait", True)
|
| 76 |
+
|
| 77 |
+
# Set orders for the power
|
| 78 |
+
if orders:
|
| 79 |
+
self.game.set_orders(power_name, orders)
|
| 80 |
+
|
| 81 |
+
# Set wait flag
|
| 82 |
+
self.game.set_wait(power_name, wait)
|
| 83 |
+
|
| 84 |
+
# Check if all active powers are ready to proceed
|
| 85 |
+
if self.game.does_not_wait():
|
| 86 |
+
# Process the current phase
|
| 87 |
+
self.game.process()
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
# Update active powers list after processing
|
| 91 |
+
self.active_powers = [name for name, power in self.game.powers.items()
|
| 92 |
+
if not power.is_eliminated()]
|
| 93 |
+
|
| 94 |
+
# Create observations for all active powers
|
| 95 |
+
observations = {}
|
| 96 |
+
for power_name in self.active_powers:
|
| 97 |
+
observations[power_name] = self._create_observation(power_name)
|
| 98 |
+
|
| 99 |
+
# Check if the game is done (either naturally or due to max steps)
|
| 100 |
+
done = self.game.is_game_done or self.current_steps >= self.max_steps
|
| 101 |
+
|
| 102 |
+
# Create info dict
|
| 103 |
+
info = {
|
| 104 |
+
"phase": self.game.get_current_phase(),
|
| 105 |
+
"active_powers": self.active_powers,
|
| 106 |
+
"centers": self.game.get_centers(),
|
| 107 |
+
"units": self.game.get_units(),
|
| 108 |
+
"current_steps": self.current_steps,
|
| 109 |
+
"max_steps_reached": self.current_steps >= self.max_steps
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
return observations, done, info
|
| 113 |
+
|
| 114 |
+
def _create_observation(self, power_name):
|
| 115 |
+
"""Create observation for a specific power.
|
| 116 |
+
|
| 117 |
+
Args:
|
| 118 |
+
power_name: The name of the power
|
| 119 |
+
|
| 120 |
+
Returns:
|
| 121 |
+
An observation dictionary
|
| 122 |
+
"""
|
| 123 |
+
observation = {
|
| 124 |
+
"phase": self.game.get_current_phase(),
|
| 125 |
+
"units": self.game.get_units(),
|
| 126 |
+
"centers": self.game.get_centers(),
|
| 127 |
+
"orderable_locations": self.game.get_orderable_locations(power_name),
|
| 128 |
+
"order_status": self.game.get_order_status(power_name),
|
| 129 |
+
"possible_orders": self._get_possible_orders_for_power(power_name)
|
| 130 |
+
}
|
| 131 |
+
return observation
|
| 132 |
+
|
| 133 |
+
def _get_possible_orders_for_power(self, power_name):
|
| 134 |
+
"""Get all possible orders for a power's units.
|
| 135 |
+
|
| 136 |
+
Args:
|
| 137 |
+
power_name: The name of the power
|
| 138 |
+
|
| 139 |
+
Returns:
|
| 140 |
+
A dictionary mapping units to their possible orders
|
| 141 |
+
"""
|
| 142 |
+
all_possible_orders = self.game.get_all_possible_orders()
|
| 143 |
+
|
| 144 |
+
# Filter for only the locations where this power has units
|
| 145 |
+
power_units = self.game.get_units(power_name)
|
| 146 |
+
power_unit_locations = [unit[2:] for unit in power_units]
|
| 147 |
+
|
| 148 |
+
# For retreat phases, include retreating units
|
| 149 |
+
if self.game.phase_type == 'R':
|
| 150 |
+
power = self.game.get_power(power_name)
|
| 151 |
+
power_unit_locations.extend([unit[2:] for unit in power.retreats])
|
| 152 |
+
|
| 153 |
+
# For adjustment phases, include buildable locations
|
| 154 |
+
elif self.game.phase_type == 'A':
|
| 155 |
+
power = self.game.get_power(power_name)
|
| 156 |
+
# If we have more centers than units, we can build
|
| 157 |
+
if len(power.centers) > len(power.units):
|
| 158 |
+
buildable_sites = self.game._build_sites(power)
|
| 159 |
+
power_unit_locations.extend(buildable_sites)
|
| 160 |
+
# If we have more units than centers, we need to remove
|
| 161 |
+
elif len(power.units) > len(power.centers):
|
| 162 |
+
# All units are candidates for removal
|
| 163 |
+
pass
|
| 164 |
+
|
| 165 |
+
# Filter the possible orders to only those for this power's units/locations
|
| 166 |
+
power_possible_orders = {}
|
| 167 |
+
for loc, orders in all_possible_orders.items():
|
| 168 |
+
if loc[:3] in power_unit_locations:
|
| 169 |
+
power_possible_orders[loc] = orders
|
| 170 |
+
|
| 171 |
+
return power_possible_orders
|
| 172 |
+
|
| 173 |
+
def get_log_info(self):
|
| 174 |
+
"""Get additional information about the environment for logging.
|
| 175 |
+
|
| 176 |
+
Returns:
|
| 177 |
+
log_info: Information about the environment required to log the game.
|
| 178 |
+
"""
|
| 179 |
+
if not self.game:
|
| 180 |
+
return {}
|
| 181 |
+
|
| 182 |
+
return {
|
| 183 |
+
"game_id": self.game.game_id,
|
| 184 |
+
"phase": self.game.get_current_phase(),
|
| 185 |
+
"map_name": self.game.map_name,
|
| 186 |
+
"centers": self.game.get_centers(),
|
| 187 |
+
"units": self.game.get_units(),
|
| 188 |
+
"powers": {name: {
|
| 189 |
+
"units": power.units,
|
| 190 |
+
"centers": power.centers,
|
| 191 |
+
"is_eliminated": power.is_eliminated(),
|
| 192 |
+
"order_status": self.game.get_order_status(name)
|
| 193 |
+
} for name, power in self.game.powers.items()},
|
| 194 |
+
"orders": self.game.get_orders(),
|
| 195 |
+
"active_powers": self.active_powers,
|
| 196 |
+
"is_game_done": self.game.is_game_done,
|
| 197 |
+
"outcome": self.game.outcome if self.game.is_game_done else None
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
def render(self, mode='human'):
|
| 201 |
+
"""Render the current state of the environment.
|
| 202 |
+
|
| 203 |
+
Args:
|
| 204 |
+
mode: The rendering mode ('human', 'svg', etc.)
|
| 205 |
+
|
| 206 |
+
Returns:
|
| 207 |
+
The rendered image if applicable
|
| 208 |
+
"""
|
| 209 |
+
self.render_mode = mode
|
| 210 |
+
if self.game:
|
| 211 |
+
if mode == 'human':
|
| 212 |
+
# Just print basic game state
|
| 213 |
+
print(f"Game: {self.game.game_id}")
|
| 214 |
+
print(f"Phase: {self.game.get_current_phase()}")
|
| 215 |
+
print(f"Active Powers: {self.active_powers}")
|
| 216 |
+
print("Supply Centers:")
|
| 217 |
+
for power_name, centers in self.game.get_centers().items():
|
| 218 |
+
print(f" {power_name}: {centers}")
|
| 219 |
+
print("Units:")
|
| 220 |
+
for power_name, units in self.game.get_units().items():
|
| 221 |
+
print(f" {power_name}: {units}")
|
| 222 |
+
return None
|
| 223 |
+
elif mode == 'svg':
|
| 224 |
+
# Return SVG representation
|
| 225 |
+
return self.game.render(output_format='svg')
|
| 226 |
+
return None
|
| 227 |
+
|
| 228 |
+
def close(self):
|
| 229 |
+
"""Perform any necessary cleanup."""
|
| 230 |
+
self.game = None
|
src_code_for_reproducibility/markov_games/group_timesteps.py
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
This module contains the logic for grouping time steps.
|
| 3 |
+
"""
|
| 4 |
+
import copy
|
| 5 |
+
from typing import Callable
|
| 6 |
+
|
| 7 |
+
from mllm.markov_games.markov_game import MarkovGame
|
| 8 |
+
from mllm.markov_games.rollout_tree import (
|
| 9 |
+
AgentActLog,
|
| 10 |
+
RolloutTreeBranchNode,
|
| 11 |
+
RolloutTreeNode,
|
| 12 |
+
RolloutTreeRootNode,
|
| 13 |
+
StepLog,
|
| 14 |
+
)
|
| 15 |
+
from mllm.markov_games.simulation import SimulationStepLog
|
| 16 |
+
|
| 17 |
+
AgentId = str
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def group_time_steps(
|
| 21 |
+
rollout_tree: RolloutTreeRootNode,
|
| 22 |
+
accumulation_stop_condition: Callable[[StepLog], bool],
|
| 23 |
+
) -> RolloutTreeRootNode:
|
| 24 |
+
"""
|
| 25 |
+
During generation, we create rollout trees according to the real time steps.
|
| 26 |
+
However, during training, we might want to treat groups of time steps as a single time step.
|
| 27 |
+
As a concrete example, take Trust-and-Split. At each round, say we have X time steps of communication and then one time step for the split.
|
| 28 |
+
Then the communication actions will not get any reward, and the split action will get the reward. During REINFORCE training, with discounting, this
|
| 29 |
+
can cause training instability. We could instead treat every action in the round as being part of a single action, and give it the reward of the split action.
|
| 30 |
+
This method helps to do this sort of grouping.
|
| 31 |
+
It accumulates actions until the accumulation_stop_condition is met, and then creates a new node with the accumulated actions.
|
| 32 |
+
It then recursively calls itself on the child node.
|
| 33 |
+
Details:
|
| 34 |
+
- The reward for the group is the reward of the last time step in the group.
|
| 35 |
+
- The simulation log for the group is the simulation log of the last time step in the group.
|
| 36 |
+
- The state end for the group becomes the first state end in the group.
|
| 37 |
+
- The agent info for the group is the agent info of the last time step in the group.
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
def group_step_logs(step_logs: list[StepLog]) -> StepLog:
|
| 41 |
+
"""
|
| 42 |
+
Concatenate per-agent chat turns across steps; keep only the first is_state_end.
|
| 43 |
+
"""
|
| 44 |
+
last_sim_log = step_logs[-1].simulation_step_log
|
| 45 |
+
agent_ids = {aid for s in step_logs for aid in s.action_logs.keys()}
|
| 46 |
+
grouped_logs: dict[AgentId, AgentActLog] = {}
|
| 47 |
+
for aid in agent_ids:
|
| 48 |
+
turns = []
|
| 49 |
+
for s in step_logs:
|
| 50 |
+
act = s.action_logs.get(aid)
|
| 51 |
+
if act and act.chat_turns:
|
| 52 |
+
turns.extend(copy.deepcopy(act.chat_turns))
|
| 53 |
+
disable_is_state_end = False
|
| 54 |
+
# Only the first state_end should be True, the rest should be False
|
| 55 |
+
for t in turns:
|
| 56 |
+
if t.is_state_end:
|
| 57 |
+
if disable_is_state_end:
|
| 58 |
+
t.is_state_end = False
|
| 59 |
+
else:
|
| 60 |
+
disable_is_state_end = True
|
| 61 |
+
continue
|
| 62 |
+
grouped_logs[aid] = AgentActLog(
|
| 63 |
+
chat_turns=turns, info=step_logs[-1].action_logs[aid].info
|
| 64 |
+
)
|
| 65 |
+
return StepLog(action_logs=grouped_logs, simulation_step_log=last_sim_log)
|
| 66 |
+
|
| 67 |
+
def group_time_steps_rec(
|
| 68 |
+
current_node: RolloutTreeNode | RolloutTreeBranchNode,
|
| 69 |
+
group_time_step: int,
|
| 70 |
+
accumulation_step_logs: list[StepLog],
|
| 71 |
+
) -> RolloutTreeNode | RolloutTreeBranchNode:
|
| 72 |
+
"""
|
| 73 |
+
Groups time steps. Recursion is used to handle branches.
|
| 74 |
+
"""
|
| 75 |
+
assert isinstance(current_node, RolloutTreeNode) or isinstance(
|
| 76 |
+
current_node, RolloutTreeBranchNode
|
| 77 |
+
), "Current node must be a tree node or a branch node. Is of type: " + str(
|
| 78 |
+
type(current_node)
|
| 79 |
+
)
|
| 80 |
+
first_group_node = None
|
| 81 |
+
current_group_node = None
|
| 82 |
+
while current_node is not None:
|
| 83 |
+
if isinstance(current_node, RolloutTreeBranchNode):
|
| 84 |
+
raise Exception(
|
| 85 |
+
"Grouping timesteps by round is not supported for branching trajectories yet."
|
| 86 |
+
)
|
| 87 |
+
# Special recursive case for branches
|
| 88 |
+
# if isinstance(current_node, RolloutTreeBranchNode):
|
| 89 |
+
# branches = {}
|
| 90 |
+
# for agent_id, branch_nodes in current_node.branches.items():
|
| 91 |
+
# branch_group_nodes = []
|
| 92 |
+
# for branch_node in branch_nodes:
|
| 93 |
+
# branch_group_node = group_time_steps_rec(
|
| 94 |
+
# current_node=branch_node,
|
| 95 |
+
# group_time_step=group_time_step,
|
| 96 |
+
# accumulation_step_logs=copy.deepcopy(accumulation_step_logs))
|
| 97 |
+
# branch_group_nodes.append(branch_group_node)
|
| 98 |
+
# branches[agent_id] = branch_group_nodes
|
| 99 |
+
|
| 100 |
+
# main_child_group_node = group_time_steps_rec(
|
| 101 |
+
# current_node=current_node.main_child,
|
| 102 |
+
# group_time_step=group_time_step,
|
| 103 |
+
# accumulation_step_logs=copy.deepcopy(accumulation_step_logs))
|
| 104 |
+
|
| 105 |
+
# return RolloutTreeBranchNode(main_child=main_child_group_node, branches=branches)
|
| 106 |
+
|
| 107 |
+
# Accumulate
|
| 108 |
+
accumulation_step_logs.append(current_node.step_log)
|
| 109 |
+
if accumulation_stop_condition(current_node.step_log):
|
| 110 |
+
grouped_step_logs = group_step_logs(accumulation_step_logs)
|
| 111 |
+
accumulation_step_logs = []
|
| 112 |
+
new_group_node = RolloutTreeNode(
|
| 113 |
+
step_log=grouped_step_logs, time_step=group_time_step, child=None
|
| 114 |
+
)
|
| 115 |
+
if first_group_node == None:
|
| 116 |
+
first_group_node = new_group_node
|
| 117 |
+
group_time_step += 1
|
| 118 |
+
if current_group_node is not None:
|
| 119 |
+
current_group_node.child = new_group_node
|
| 120 |
+
current_group_node = new_group_node
|
| 121 |
+
current_node = current_node.child
|
| 122 |
+
return first_group_node
|
| 123 |
+
|
| 124 |
+
node = group_time_steps_rec(
|
| 125 |
+
current_node=rollout_tree.child, group_time_step=0, accumulation_step_logs=[]
|
| 126 |
+
)
|
| 127 |
+
return RolloutTreeRootNode(
|
| 128 |
+
id=rollout_tree.id,
|
| 129 |
+
crn_id=rollout_tree.crn_id,
|
| 130 |
+
child=node,
|
| 131 |
+
agent_ids=rollout_tree.agent_ids,
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def stop_when_round_ends(step_log: StepLog) -> bool:
|
| 136 |
+
"""
|
| 137 |
+
Simplest stop condition. Will return True if step log is the last time step of a round.
|
| 138 |
+
This will throw an error if this information is not available in the simulation info.
|
| 139 |
+
"""
|
| 140 |
+
assert (
|
| 141 |
+
"is_last_timestep_in_round" in step_log.simulation_step_log.info.keys()
|
| 142 |
+
), "To group by round, is_last_timestep_in_round must be set in the info of your simulation step log at each time step."
|
| 143 |
+
return step_log.simulation_step_log.info["is_last_timestep_in_round"]
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def group_by_round(rollout_tree: RolloutTreeRootNode) -> RolloutTreeRootNode:
|
| 147 |
+
"""
|
| 148 |
+
Groups time steps by round.
|
| 149 |
+
"""
|
| 150 |
+
return group_time_steps(rollout_tree, stop_when_round_ends)
|
src_code_for_reproducibility/markov_games/linear_runner.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import json
|
| 3 |
+
import os.path
|
| 4 |
+
|
| 5 |
+
from mllm.markov_games.markov_game import MarkovGame
|
| 6 |
+
from mllm.markov_games.rollout_tree import RolloutTreeNode, RolloutTreeRootNode
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
async def LinearRunner(
|
| 10 |
+
markov_game: MarkovGame, output_folder: str
|
| 11 |
+
) -> RolloutTreeRootNode:
|
| 12 |
+
"""
|
| 13 |
+
This method generates a trajectory without branching.
|
| 14 |
+
"""
|
| 15 |
+
time_step = 0
|
| 16 |
+
terminated = False
|
| 17 |
+
root = RolloutTreeRootNode(
|
| 18 |
+
id=markov_game.get_id(),
|
| 19 |
+
crn_id=markov_game.get_crn_id(),
|
| 20 |
+
agent_ids=markov_game.get_agent_ids(),
|
| 21 |
+
)
|
| 22 |
+
previous_node = root
|
| 23 |
+
while not terminated:
|
| 24 |
+
terminated, step_log = await markov_game.step()
|
| 25 |
+
current_node = RolloutTreeNode(step_log=step_log, time_step=time_step)
|
| 26 |
+
previous_node.child = current_node
|
| 27 |
+
previous_node = current_node
|
| 28 |
+
time_step += 1
|
| 29 |
+
|
| 30 |
+
return root
|
src_code_for_reproducibility/markov_games/mg_utils.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import copy
|
| 3 |
+
from collections.abc import Callable
|
| 4 |
+
from dataclasses import dataclass
|
| 5 |
+
|
| 6 |
+
from mllm.markov_games.ipd.ipd_agent import IPDAgent
|
| 7 |
+
from mllm.markov_games.ipd.ipd_simulation import IPD
|
| 8 |
+
from mllm.markov_games.markov_game import MarkovGame
|
| 9 |
+
from mllm.markov_games.negotiation.dond_agent import DealNoDealAgent
|
| 10 |
+
from mllm.markov_games.negotiation.dond_simulation import DealNoDealSimulation
|
| 11 |
+
from mllm.markov_games.negotiation.nego_hard_coded_policies import (
|
| 12 |
+
HardCodedNegoGreedyPolicy,
|
| 13 |
+
HardCodedNegoWelfareMaximizingPolicy,
|
| 14 |
+
)
|
| 15 |
+
from mllm.markov_games.ipd.Ipd_hard_coded_agents import AlwaysCooperateIPDAgent, AlwaysDefectIPDAgent
|
| 16 |
+
from mllm.markov_games.negotiation.no_press_nego_agent import NoPressAgent
|
| 17 |
+
from mllm.markov_games.negotiation.no_press_nego_simulation import NoPressSimulation
|
| 18 |
+
from mllm.markov_games.negotiation.tas_agent import TrustAndSplitAgent
|
| 19 |
+
from mllm.markov_games.negotiation.tas_rps_agent import TrustAndSplitRPSAgent
|
| 20 |
+
from mllm.markov_games.negotiation.tas_rps_simulation import TrustAndSplitRPSSimulation
|
| 21 |
+
from mllm.markov_games.negotiation.tas_simple_agent import TrustAndSplitSimpleAgent
|
| 22 |
+
from mllm.markov_games.negotiation.tas_simple_simulation import (
|
| 23 |
+
TrustAndSplitSimpleSimulation,
|
| 24 |
+
)
|
| 25 |
+
from mllm.markov_games.negotiation.tas_simulation import TrustAndSplitSimulation
|
| 26 |
+
from mllm.markov_games.rollout_tree import (
|
| 27 |
+
AgentActLog,
|
| 28 |
+
RolloutTreeBranchNode,
|
| 29 |
+
RolloutTreeNode,
|
| 30 |
+
RolloutTreeRootNode,
|
| 31 |
+
StepLog,
|
| 32 |
+
)
|
| 33 |
+
from mllm.markov_games.simulation import SimulationStepLog
|
| 34 |
+
|
| 35 |
+
AgentId = str
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
@dataclass
|
| 39 |
+
class AgentConfig:
|
| 40 |
+
agent_id: str
|
| 41 |
+
agent_name: str
|
| 42 |
+
agent_class_name: str
|
| 43 |
+
policy_id: str
|
| 44 |
+
init_kwargs: dict
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
@dataclass
|
| 48 |
+
class MarkovGameConfig:
|
| 49 |
+
id: int
|
| 50 |
+
seed: int
|
| 51 |
+
simulation_class_name: str
|
| 52 |
+
simulation_init_args: dict
|
| 53 |
+
agent_configs: list[AgentConfig]
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def init_markov_game_components(
|
| 57 |
+
config: MarkovGameConfig, policies: dict[str, Callable[[list[dict]], str]]
|
| 58 |
+
):
|
| 59 |
+
"""
|
| 60 |
+
TOWRITE
|
| 61 |
+
"""
|
| 62 |
+
agents = {}
|
| 63 |
+
agent_names = []
|
| 64 |
+
for agent_config in config.agent_configs:
|
| 65 |
+
agent_id = agent_config.agent_id
|
| 66 |
+
agent_name = agent_config.agent_name
|
| 67 |
+
agent_class = eval(agent_config.agent_class_name)
|
| 68 |
+
agent = agent_class(
|
| 69 |
+
seed=config.seed,
|
| 70 |
+
agent_id=agent_id,
|
| 71 |
+
agent_name=agent_name,
|
| 72 |
+
policy=policies[agent_config.policy_id],
|
| 73 |
+
**agent_config.init_kwargs,
|
| 74 |
+
)
|
| 75 |
+
agents[agent_id] = agent
|
| 76 |
+
agent_names.append(agent_name)
|
| 77 |
+
simulation = eval(config.simulation_class_name)(
|
| 78 |
+
seed=config.seed,
|
| 79 |
+
agent_ids=list(agents.keys()),
|
| 80 |
+
agent_names=agent_names,
|
| 81 |
+
**config.simulation_init_args,
|
| 82 |
+
)
|
| 83 |
+
markov_game = MarkovGame(
|
| 84 |
+
id=config.id,
|
| 85 |
+
crn_id=config.seed,
|
| 86 |
+
agents=agents,
|
| 87 |
+
simulation=simulation,
|
| 88 |
+
)
|
| 89 |
+
return markov_game
|
src_code_for_reproducibility/markov_games/negotiation/README.md
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## Negotiation Games: core mechanics and variants
|
| 2 |
+
|
| 3 |
+
This family of games feature two agents who, in each round, may briefly communicate and then simultaneously propose how to split a fixed resource (most commonly 10 coins). Rewards are the amount kept multiplied by an agent’s per-unit value. The starting speaker alternates deterministically across rounds.
|
| 4 |
+
|
| 5 |
+
Communication is optional and variant-dependent: some settings encourage rich messaging to share private information, while others remove messaging entirely to focus on allocation behavior.
|
| 6 |
+
|
| 7 |
+
Proportional splitting is used when the two proposals exceed the available total: allocations are scaled proportionally rather than discarded. This preserves a useful learning signal even when agents over-claim.
|
| 8 |
+
|
| 9 |
+
### Variants (in increasing difficulty)
|
| 10 |
+
|
| 11 |
+
- No‑Press Split
|
| 12 |
+
- Single item type (coins)
|
| 13 |
+
- No communication; agents go straight to making split proposals, with the starting player alternating deterministically.
|
| 14 |
+
- Motivation: mirrors no‑communication setups (e.g., Advantage Alignment) while keeping the split decision nontrivial.
|
| 15 |
+
- Deterministic Mode: values are fixed and public: one agent values coins at 10, the other at 1 (alternates each round).
|
| 16 |
+
- Stochastic Mode: values are random and uncorrelated.
|
| 17 |
+
|
| 18 |
+
- Trust-and-Split RPS (TAS-RPS)
|
| 19 |
+
- Single item type (coins)
|
| 20 |
+
- Each round, a rock–paper–scissors hand draw creates a strong asymmetry: the winner’s per-coin value is 10, the loser’s is 1.
|
| 21 |
+
- Each agent initially sees only their own hand and must communicate to coordinate an optimal split.
|
| 22 |
+
- Motivation: enforce large value disparity so one’s own value reveals little about the other’s (avoiding ceiling effects) and incentivize meaningful communication.
|
| 23 |
+
|
| 24 |
+
- Trust-and-Split (TAS)
|
| 25 |
+
- Single item type (coins); each round, each agent’s per-coin value is independently sampled in a broad range (e.g., 1–20).
|
| 26 |
+
- Each agent observes only their own value; they may use short messages to share and negotiate.
|
| 27 |
+
- Motivation: a simple blend that tests whether agents learn to exchange private information and coordinate proportional, value-aware splits.
|
| 28 |
+
|
| 29 |
+
- Deal-or-No-Deal (DOND)
|
| 30 |
+
- Introduced in [Deal or No Deal? End-to-End Learning for Negotiation Dialogues](https://arxiv.org/pdf/1706.05125)
|
| 31 |
+
- Multiple item types (typically "books", "hats" and "balls") with limited stocks; each agent has its own per-type values.
|
| 32 |
+
- A deal pays out only if both proposals exactly agree and respect the stock; otherwise no deal (zero reward) that round.
|
| 33 |
+
- Motivation: a known benchmark closer to real-world bargaining, where both parties must explicitly agree.
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
|
src_code_for_reproducibility/markov_games/negotiation/dond_agent.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import re
|
| 3 |
+
from collections.abc import Callable
|
| 4 |
+
from dataclasses import dataclass
|
| 5 |
+
from typing import Any, Dict, List, Tuple
|
| 6 |
+
|
| 7 |
+
from mllm.markov_games.agent import Agent
|
| 8 |
+
from mllm.markov_games.rollout_tree import AgentActLog, ChatTurn
|
| 9 |
+
from mllm.markov_games.negotiation.dond_simulation import (
|
| 10 |
+
DealNoDealObs,
|
| 11 |
+
)
|
| 12 |
+
from mllm.markov_games.negotiation.nego_simulation import Split
|
| 13 |
+
from mllm.markov_games.negotiation.nego_agent import NegotiationAgent, NegotiationAgentState
|
| 14 |
+
|
| 15 |
+
class DealNoDealAgent(NegotiationAgent):
|
| 16 |
+
def __init__(
|
| 17 |
+
self,
|
| 18 |
+
*args,
|
| 19 |
+
**kwargs,
|
| 20 |
+
):
|
| 21 |
+
super().__init__(*args, **kwargs)
|
| 22 |
+
self.intro_prompt = (
|
| 23 |
+
"You are {agent_id}. You are playing an iterated game. "
|
| 24 |
+
"At each round, you and other agent will try to distribute among yourselves items of types {item_types}. "
|
| 25 |
+
"You only know how much you value each item type, but not the other agent's values. "
|
| 26 |
+
"You can communicate with the other agent by sending up to {quota_messages_per_agent_per_round} short messages per round. "
|
| 27 |
+
"Each round, after exchanging messages, you and the other agent will submit a private proposal. "
|
| 28 |
+
"A deal is accepted only if both proposals match exactly and are within stock; otherwise no deal (0 points for both at that round). "
|
| 29 |
+
"The values of the items of the other agent at the previous round are revealed to you after each round. "
|
| 30 |
+
"Your goal is: {goal}."
|
| 31 |
+
)
|
| 32 |
+
self.new_round_prompt = ("New round {round_nb}. Items: {stock}. Your values: {values}. ")
|
| 33 |
+
self.last_round_prompt = ("Last round, other agent's values: {previous_values_coagent}. ")
|
| 34 |
+
self.send_split_prompt = ("Respond with <split>...</split> where you propose how many items of each type you want to keep.")
|
| 35 |
+
|
| 36 |
+
def get_message_regex(self, observation: DealNoDealObs) -> str:
|
| 37 |
+
return r"<message>[\s\S]{0,400}</message>"
|
| 38 |
+
|
| 39 |
+
def get_split_regex(self, observation: DealNoDealObs) -> str:
|
| 40 |
+
parts = []
|
| 41 |
+
for t in observation.item_types:
|
| 42 |
+
s = int(observation.quantities.get(t, 0))
|
| 43 |
+
allowed = "|".join(str(k) for k in range(0, s + 1))
|
| 44 |
+
rng = f"({allowed})"
|
| 45 |
+
parts.append(fr"<{t}>{rng}</{t}>")
|
| 46 |
+
items_block = "".join(parts)
|
| 47 |
+
return fr"(<split>{items_block}</split>)"
|
| 48 |
+
|
| 49 |
+
def get_split_action(self, policy_output: str, observation: DealNoDealObs) -> Split:
|
| 50 |
+
import re as _re
|
| 51 |
+
allocations: Dict[str, int] = {}
|
| 52 |
+
for t in observation.item_types:
|
| 53 |
+
m = _re.search(fr"<{t}>([0-9]+)</{t}>", policy_output)
|
| 54 |
+
if m:
|
| 55 |
+
allocations[t] = int(m.group(1))
|
| 56 |
+
else:
|
| 57 |
+
allocations[t] = 0
|
| 58 |
+
return Split(items_given_to_self=allocations)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
|
src_code_for_reproducibility/markov_games/negotiation/dond_simulation.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
from dataclasses import dataclass
|
| 3 |
+
from typing import Any, Dict, List, Tuple
|
| 4 |
+
|
| 5 |
+
from numpy.random import default_rng
|
| 6 |
+
|
| 7 |
+
from mllm.markov_games.rollout_tree import SimulationStepLog
|
| 8 |
+
from mllm.markov_games.negotiation.nego_simulation import Split, NegotiationState, NegotiationObs, NegotiationSimulation
|
| 9 |
+
from mllm.utils.get_coagent_id import get_coagent_id
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
AgentId = str
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@dataclass
|
| 16 |
+
class DealNoDealState(NegotiationState):
|
| 17 |
+
item_types: List[str]
|
| 18 |
+
values: Dict[AgentId, Dict[str, int]]
|
| 19 |
+
|
| 20 |
+
@dataclass
|
| 21 |
+
class DealNoDealObs(NegotiationObs):
|
| 22 |
+
my_values: Dict[str, int]
|
| 23 |
+
item_types: List[str]
|
| 24 |
+
previous_values_coagent: Dict[str, int] | None
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def random_partition_integer(rng, total: int, parts: int) -> List[int]:
|
| 28 |
+
if parts <= 0:
|
| 29 |
+
return []
|
| 30 |
+
if total <= 0:
|
| 31 |
+
return [0 for _ in range(parts)]
|
| 32 |
+
cuts = sorted(rng.integers(0, total + 1, size=parts - 1).tolist())
|
| 33 |
+
vals = []
|
| 34 |
+
prev = 0
|
| 35 |
+
for c in cuts + [total]:
|
| 36 |
+
vals.append(c - prev)
|
| 37 |
+
prev = c
|
| 38 |
+
return vals
|
| 39 |
+
|
| 40 |
+
class DealNoDealSimulation(NegotiationSimulation):
|
| 41 |
+
|
| 42 |
+
def __init__(
|
| 43 |
+
self,
|
| 44 |
+
item_types: List[str] = ["books", "hats", "balls"],
|
| 45 |
+
*args,
|
| 46 |
+
**kwargs,
|
| 47 |
+
):
|
| 48 |
+
super().__init__(item_types=item_types, *args, **kwargs)
|
| 49 |
+
self.reset()
|
| 50 |
+
|
| 51 |
+
def _other(self, agent_id: AgentId) -> AgentId:
|
| 52 |
+
return get_coagent_id(self.agent_ids, agent_id)
|
| 53 |
+
|
| 54 |
+
def _sample_stock(self) -> Dict[str, int]:
|
| 55 |
+
# total items between 5 and 7
|
| 56 |
+
total_items = int(self.rng.integers(5, 8))
|
| 57 |
+
# nonnegative per-type counts summing to total_items
|
| 58 |
+
parts = random_partition_integer(self.rng, total_items, len(self.item_types))
|
| 59 |
+
# allow zeros per type
|
| 60 |
+
return {t: int(c) for t, c in zip(self.item_types, parts)}
|
| 61 |
+
|
| 62 |
+
def _sample_values_pair(self) -> Dict[AgentId, Dict[str, int]]:
|
| 63 |
+
# Each agent has integer non-negative values that sum to 10
|
| 64 |
+
# Each item type valued by at least one agent
|
| 65 |
+
# Some item type valued by both agents
|
| 66 |
+
while True:
|
| 67 |
+
vals_a = random_partition_integer(self.rng, 10, len(self.item_types))
|
| 68 |
+
vals_b = random_partition_integer(self.rng, 10, len(self.item_types))
|
| 69 |
+
a = {t: int(v) for t, v in zip(self.item_types, vals_a)}
|
| 70 |
+
b = {t: int(v) for t, v in zip(self.item_types, vals_b)}
|
| 71 |
+
# each item valued by at least one
|
| 72 |
+
ok1 = all((a[t] > 0) or (b[t] > 0) for t in self.item_types)
|
| 73 |
+
# some item valued by both
|
| 74 |
+
ok2 = any((a[t] > 0) and (b[t] > 0) for t in self.item_types)
|
| 75 |
+
if ok1 and ok2:
|
| 76 |
+
return {self.agent_ids[0]: a, self.agent_ids[1]: b}
|
| 77 |
+
|
| 78 |
+
def _is_valid_allocation(self, allocation: Dict[str, int], stock: Dict[str, int]) -> bool:
|
| 79 |
+
for t in self.item_types:
|
| 80 |
+
v = allocation.get(t)
|
| 81 |
+
if v is None:
|
| 82 |
+
return False
|
| 83 |
+
if not isinstance(v, int):
|
| 84 |
+
return False
|
| 85 |
+
if v < 0 or v > int(stock.get(t, 0)):
|
| 86 |
+
return False
|
| 87 |
+
return True
|
| 88 |
+
|
| 89 |
+
def set_new_round_of_variant(self):
|
| 90 |
+
# Keep same values, resample stock
|
| 91 |
+
self.state.quantities = self._sample_stock()
|
| 92 |
+
|
| 93 |
+
def get_info_of_variant(self, state: NegotiationState, actions: Dict[AgentId, Any]) -> Dict[str, Any]:
|
| 94 |
+
return {
|
| 95 |
+
"quantities": copy.deepcopy(state.quantities),
|
| 96 |
+
"values": copy.deepcopy(state.values),
|
| 97 |
+
'splits': copy.deepcopy(state.splits),
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
def get_rewards(self, splits: Dict[AgentId, Split]) -> Dict[AgentId, float]:
|
| 101 |
+
"""
|
| 102 |
+
Returns the rewards for each agent.
|
| 103 |
+
"""
|
| 104 |
+
split_a = splits[self.agent_ids[0]].items_given_to_self
|
| 105 |
+
split_b = splits[self.agent_ids[1]].items_given_to_self
|
| 106 |
+
rewards = {self.agent_ids[0]: 0, self.agent_ids[1]: 0}
|
| 107 |
+
for t in self.item_types:
|
| 108 |
+
# If not complementary, return 0!
|
| 109 |
+
if not split_a[t] + split_b[t] == self.state.quantities[t]:
|
| 110 |
+
return {self.agent_ids[0]: 0, self.agent_ids[1]: 0}
|
| 111 |
+
rewards[self.agent_ids[0]] += split_a[t] * self.state.values[self.agent_ids[0]][t]
|
| 112 |
+
rewards[self.agent_ids[1]] += split_b[t] * self.state.values[self.agent_ids[1]][t]
|
| 113 |
+
return rewards
|
| 114 |
+
|
| 115 |
+
def get_obs(self):
|
| 116 |
+
return {agent_id: self.get_obs_agent(agent_id) for agent_id in self.agent_ids}
|
| 117 |
+
|
| 118 |
+
def get_obs_agent(self, agent_id):
|
| 119 |
+
other_id = self._other(agent_id)
|
| 120 |
+
obs = DealNoDealObs(
|
| 121 |
+
round_nb=self.state.round_nb,
|
| 122 |
+
last_message=self.state.last_message,
|
| 123 |
+
current_agent=self.state.current_agent,
|
| 124 |
+
quantities=copy.deepcopy(self.state.quantities),
|
| 125 |
+
value=0.0, # unused in DOND
|
| 126 |
+
other_agent_split=None, # not meaningful until split
|
| 127 |
+
split_phase=self.state.split_phase,
|
| 128 |
+
quota_messages_per_agent_per_round=self.quota_messages_per_agent_per_round,
|
| 129 |
+
my_values=copy.deepcopy(self.state.values[agent_id]),
|
| 130 |
+
item_types=list(self.item_types),
|
| 131 |
+
previous_values_coagent=copy.deepcopy(self.state.values.get(other_id, {})),
|
| 132 |
+
)
|
| 133 |
+
return obs
|
| 134 |
+
|
| 135 |
+
def reset(self):
|
| 136 |
+
start_agent = self.agent_ids[self._starting_agent_index]
|
| 137 |
+
stock = self._sample_stock()
|
| 138 |
+
values = self._sample_values_pair()
|
| 139 |
+
self.state = DealNoDealState(
|
| 140 |
+
round_nb=0,
|
| 141 |
+
last_message="",
|
| 142 |
+
current_agent=start_agent,
|
| 143 |
+
quantities=stock,
|
| 144 |
+
values=values,
|
| 145 |
+
previous_values=None,
|
| 146 |
+
splits={aid: None for aid in self.agent_ids},
|
| 147 |
+
nb_messages_sent={aid: 0 for aid in self.agent_ids},
|
| 148 |
+
split_phase=False,
|
| 149 |
+
item_types=list(self.item_types),
|
| 150 |
+
)
|
| 151 |
+
return self.get_obs()
|
| 152 |
+
|
| 153 |
+
|
src_code_for_reproducibility/markov_games/rollout_tree.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
TODO: add parent to nodes so that some verification can be done. For instance, to ensure that node reward keys match the parent node.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from __future__ import annotations
|
| 6 |
+
|
| 7 |
+
import json
|
| 8 |
+
from dataclasses import dataclass
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
from typing import Any, List, Literal, Optional, Tuple
|
| 11 |
+
|
| 12 |
+
import jsonschema
|
| 13 |
+
from pydantic import BaseModel, Field, model_validator
|
| 14 |
+
|
| 15 |
+
from mllm.chat_utils.chat_turn import ChatTurn
|
| 16 |
+
|
| 17 |
+
AgentId = str
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class SimulationStepLog(BaseModel):
|
| 21 |
+
rewards: dict[AgentId, float]
|
| 22 |
+
info: Any = None
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class AgentActLog(BaseModel):
|
| 26 |
+
chat_turns: list[ChatTurn] | None
|
| 27 |
+
info: Any = None
|
| 28 |
+
|
| 29 |
+
@model_validator(mode="after")
|
| 30 |
+
def _exactly_one_state_end(self):
|
| 31 |
+
"""
|
| 32 |
+
This method is used to enforce that for each AgentActLog, there is exactly one ChatTurn which is a state end.
|
| 33 |
+
"""
|
| 34 |
+
if self.chat_turns != []:
|
| 35 |
+
n = sum(1 for t in self.chat_turns if t.is_state_end)
|
| 36 |
+
if n != 1:
|
| 37 |
+
raise ValueError(
|
| 38 |
+
f"AgentActLog must have exactly one ChatTurn with is_state_end=True; got {self.chat_turns}."
|
| 39 |
+
)
|
| 40 |
+
return self
|
| 41 |
+
else:
|
| 42 |
+
return self
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class StepLog(BaseModel):
|
| 46 |
+
action_logs: dict[AgentId, AgentActLog]
|
| 47 |
+
simulation_step_log: SimulationStepLog
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
# BranchType = Literal["unilateral_deviation", "common_deviation"] # might not be necessary
|
| 51 |
+
# class BranchNodeInfo(BaseModel):
|
| 52 |
+
# branch_id: str
|
| 53 |
+
# branch_for: AgentId
|
| 54 |
+
# branch_type: BranchType
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class RolloutTreeNode(BaseModel):
|
| 58 |
+
step_log: StepLog
|
| 59 |
+
time_step: int
|
| 60 |
+
child: RolloutTreeNode | RolloutTreeBranchNode | None = None
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class RolloutTreeBranchNode(BaseModel):
|
| 64 |
+
"""
|
| 65 |
+
First item of the tuple indicates which agent "called" for an alternative branch.
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
main_child: RolloutTreeNode
|
| 69 |
+
branches: dict[AgentId, list[RolloutTreeNode]] | None = None
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
class RolloutTreeRootNode(BaseModel):
|
| 73 |
+
id: int
|
| 74 |
+
crn_id: int # ID of the rng used to generate this rollout tree
|
| 75 |
+
child: RolloutTreeNode | RolloutTreeBranchNode | None = None
|
| 76 |
+
agent_ids: List[AgentId] = Field(min_length=1)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
# class RolloutTreeLeafNode(BaseModel):
|
| 80 |
+
# step_log: StepLog
|
| 81 |
+
# time_step: int
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
# Necessary for self-referential stuff in pydantic
|
| 85 |
+
RolloutTreeBranchNode.model_rebuild()
|
| 86 |
+
RolloutTreeNode.model_rebuild()
|
src_code_for_reproducibility/markov_games/run_markov_games.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
from collections.abc import Callable
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
|
| 5 |
+
from torch._C import ClassType
|
| 6 |
+
|
| 7 |
+
from mllm.markov_games.markov_game import MarkovGame
|
| 8 |
+
from mllm.markov_games.rollout_tree import RolloutTreeRootNode
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
async def run_markov_games(
|
| 12 |
+
runner: Callable[[MarkovGame], RolloutTreeRootNode],
|
| 13 |
+
runner_kwargs: dict,
|
| 14 |
+
output_folder: str,
|
| 15 |
+
markov_games: list[MarkovGame],
|
| 16 |
+
) -> list[RolloutTreeRootNode]:
|
| 17 |
+
tasks = []
|
| 18 |
+
for mg in markov_games:
|
| 19 |
+
tasks.append(
|
| 20 |
+
asyncio.create_task(
|
| 21 |
+
runner(markov_game=mg, output_folder=output_folder, **runner_kwargs)
|
| 22 |
+
)
|
| 23 |
+
)
|
| 24 |
+
return await asyncio.gather(*tasks)
|
src_code_for_reproducibility/markov_games/simulation.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
A Simulation is the environment of a Markov Game.
|
| 3 |
+
The Simulation is not responsible for properly checking / formatting the responses of LLM's.
|
| 4 |
+
This is the job of the `Agent` class.
|
| 5 |
+
Simulations expect clean actions, and are defined similarly to `gymnasium` environments, except that they are adapted for the Multi-agent setting.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from abc import ABC, abstractmethod
|
| 9 |
+
from typing import Any, Tuple
|
| 10 |
+
|
| 11 |
+
from numpy.random import default_rng
|
| 12 |
+
|
| 13 |
+
from mllm.markov_games.rollout_tree import SimulationStepLog
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class Simulation(ABC):
|
| 17 |
+
@abstractmethod
|
| 18 |
+
def __init__(self, seed: int, *args, **kwargs):
|
| 19 |
+
self.seed = seed
|
| 20 |
+
self.rng = default_rng(self.seed)
|
| 21 |
+
|
| 22 |
+
@abstractmethod
|
| 23 |
+
def step(self, actions: Any) -> Tuple[bool, SimulationStepLog]:
|
| 24 |
+
"""
|
| 25 |
+
Returns terminated, info
|
| 26 |
+
"""
|
| 27 |
+
raise NotImplementedError
|
| 28 |
+
|
| 29 |
+
def get_obs(self):
|
| 30 |
+
"""Returns all agent observations in dict
|
| 31 |
+
|
| 32 |
+
Returns:
|
| 33 |
+
observations
|
| 34 |
+
"""
|
| 35 |
+
raise NotImplementedError
|
| 36 |
+
|
| 37 |
+
def get_obs_agent(self, agent_id):
|
| 38 |
+
"""Returns observation for agent_id"""
|
| 39 |
+
raise NotImplementedError
|
| 40 |
+
|
| 41 |
+
def get_obs_size(self):
|
| 42 |
+
"""Returns the shape of the observation"""
|
| 43 |
+
raise NotImplementedError
|
| 44 |
+
|
| 45 |
+
def get_state(self):
|
| 46 |
+
raise NotImplementedError
|
| 47 |
+
|
| 48 |
+
def get_state_size(self):
|
| 49 |
+
"""Returns the shape of the state"""
|
| 50 |
+
raise NotImplementedError
|
| 51 |
+
|
| 52 |
+
def get_avail_actions(self):
|
| 53 |
+
raise NotImplementedError
|
| 54 |
+
|
| 55 |
+
def get_avail_agent_actions(self, agent_id):
|
| 56 |
+
"""Returns the available actions for agent_id"""
|
| 57 |
+
raise NotImplementedError
|
| 58 |
+
|
| 59 |
+
def get_total_actions(self):
|
| 60 |
+
"""Returns the total number of actions an agent could ever take"""
|
| 61 |
+
# TODO: This is only suitable for a discrete 1 dimensional action space for each agent
|
| 62 |
+
raise NotImplementedError
|
| 63 |
+
|
| 64 |
+
def get_safe_copy(self):
|
| 65 |
+
"""
|
| 66 |
+
Return copy of the agent object that is decorrelated from the original object.
|
| 67 |
+
"""
|
| 68 |
+
raise NotImplementedError
|
| 69 |
+
|
| 70 |
+
def reset(self):
|
| 71 |
+
"""Returns initial observations and states"""
|
| 72 |
+
raise NotImplementedError
|
| 73 |
+
|
| 74 |
+
def render(self):
|
| 75 |
+
raise NotImplementedError
|
| 76 |
+
|
| 77 |
+
def close(self):
|
| 78 |
+
raise NotImplementedError
|
| 79 |
+
|
| 80 |
+
# def seed(self):
|
| 81 |
+
# raise NotImplementedError
|
| 82 |
+
|
| 83 |
+
def save_replay(self):
|
| 84 |
+
raise NotImplementedError
|
| 85 |
+
|
| 86 |
+
def get_simulation_info(self):
|
| 87 |
+
raise NotImplementedError
|
src_code_for_reproducibility/markov_games/statistics_runner.py
ADDED
|
@@ -0,0 +1,405 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import gc
|
| 4 |
+
import json
|
| 5 |
+
import pickle
|
| 6 |
+
from dataclasses import dataclass
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional
|
| 9 |
+
|
| 10 |
+
from basic_render import find_iteration_folders
|
| 11 |
+
|
| 12 |
+
from mllm.markov_games.rollout_tree import (
|
| 13 |
+
RolloutTreeBranchNode,
|
| 14 |
+
RolloutTreeNode,
|
| 15 |
+
RolloutTreeRootNode,
|
| 16 |
+
SimulationStepLog,
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def _iterate_main_nodes(root: RolloutTreeRootNode) -> Iterator[RolloutTreeNode]:
|
| 21 |
+
"""
|
| 22 |
+
Iterate the main path nodes without materializing full path lists.
|
| 23 |
+
"""
|
| 24 |
+
current = root.child
|
| 25 |
+
while current is not None:
|
| 26 |
+
if isinstance(current, RolloutTreeNode):
|
| 27 |
+
yield current
|
| 28 |
+
current = current.child
|
| 29 |
+
elif isinstance(current, RolloutTreeBranchNode):
|
| 30 |
+
# Follow only the main child on the main trajectory
|
| 31 |
+
current = current.main_child
|
| 32 |
+
else:
|
| 33 |
+
break
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def iterate_main_simulation_logs(
|
| 37 |
+
root: RolloutTreeRootNode,
|
| 38 |
+
) -> Iterator[SimulationStepLog]:
|
| 39 |
+
for node in _iterate_main_nodes(root):
|
| 40 |
+
yield node.step_log.simulation_step_log
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def stream_rollout_files(iteration_folder: Path) -> Iterator[Path]:
|
| 44 |
+
for p in iteration_folder.rglob("*.rt.pkl"):
|
| 45 |
+
if p.is_file():
|
| 46 |
+
yield p
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def load_root(path: Path) -> RolloutTreeRootNode:
|
| 50 |
+
with open(path, "rb") as f:
|
| 51 |
+
data = pickle.load(f)
|
| 52 |
+
return RolloutTreeRootNode.model_validate(data)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
@dataclass
|
| 56 |
+
class StatRecord:
|
| 57 |
+
mgid: int
|
| 58 |
+
crn_id: Optional[int]
|
| 59 |
+
iteration: str
|
| 60 |
+
values: Dict[str, Any]
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class StatComputer:
|
| 64 |
+
"""
|
| 65 |
+
Stateful stat computer that consumes SimulationStepLog instances
|
| 66 |
+
and produces final aggregated values for one rollout (mgid).
|
| 67 |
+
"""
|
| 68 |
+
|
| 69 |
+
def update(self, sl: SimulationStepLog) -> None: # pragma: no cover - interface
|
| 70 |
+
raise NotImplementedError
|
| 71 |
+
|
| 72 |
+
def finalize(self) -> Dict[str, Any]: # pragma: no cover - interface
|
| 73 |
+
raise NotImplementedError
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def run_stats(
|
| 77 |
+
data_root: Path,
|
| 78 |
+
game_name: str,
|
| 79 |
+
make_computers: Callable[[], List[StatComputer]],
|
| 80 |
+
output_filename: Optional[str] = None,
|
| 81 |
+
output_format: str = "json", # "json" (dict of lists) or "jsonl"
|
| 82 |
+
) -> Path:
|
| 83 |
+
"""
|
| 84 |
+
Compute stats across all iteration_* folders under data_root.
|
| 85 |
+
Writes JSONL to data_root/statistics/<output_filename or f"{game_name}.stats.jsonl">.
|
| 86 |
+
"""
|
| 87 |
+
data_root = Path(data_root)
|
| 88 |
+
outdir = data_root / "statistics"
|
| 89 |
+
outdir.mkdir(parents=True, exist_ok=True)
|
| 90 |
+
# Choose extension by format
|
| 91 |
+
default_name = (
|
| 92 |
+
f"{game_name}.stats.json"
|
| 93 |
+
if output_format == "json"
|
| 94 |
+
else f"{game_name}.stats.jsonl"
|
| 95 |
+
)
|
| 96 |
+
outfile = outdir / (
|
| 97 |
+
output_filename if output_filename is not None else default_name
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
# Rewrite file each run to keep it clean and small
|
| 101 |
+
if outfile.exists():
|
| 102 |
+
outfile.unlink()
|
| 103 |
+
|
| 104 |
+
iteration_folders = find_iteration_folders(str(data_root))
|
| 105 |
+
|
| 106 |
+
# If writing JSONL, stream directly; otherwise accumulate minimal records
|
| 107 |
+
if output_format == "jsonl":
|
| 108 |
+
with open(outfile, "w", encoding="utf-8") as w:
|
| 109 |
+
for iteration_folder in iteration_folders:
|
| 110 |
+
iteration_name = Path(iteration_folder).name
|
| 111 |
+
for pkl_path in stream_rollout_files(Path(iteration_folder)):
|
| 112 |
+
root = load_root(pkl_path)
|
| 113 |
+
|
| 114 |
+
computers = make_computers()
|
| 115 |
+
for sl in iterate_main_simulation_logs(root):
|
| 116 |
+
for comp in computers:
|
| 117 |
+
try:
|
| 118 |
+
comp.update(sl)
|
| 119 |
+
except Exception:
|
| 120 |
+
continue
|
| 121 |
+
|
| 122 |
+
values: Dict[str, Any] = {}
|
| 123 |
+
for comp in computers:
|
| 124 |
+
try:
|
| 125 |
+
values.update(comp.finalize())
|
| 126 |
+
except Exception:
|
| 127 |
+
continue
|
| 128 |
+
|
| 129 |
+
rec = {
|
| 130 |
+
"mgid": getattr(root, "id", None),
|
| 131 |
+
"crn_id": getattr(root, "crn_id", None),
|
| 132 |
+
"iteration": iteration_name,
|
| 133 |
+
"stats": values,
|
| 134 |
+
}
|
| 135 |
+
w.write(json.dumps(rec, ensure_ascii=False) + "\n")
|
| 136 |
+
|
| 137 |
+
del root
|
| 138 |
+
del computers
|
| 139 |
+
gc.collect()
|
| 140 |
+
else:
|
| 141 |
+
# Aggregate to dict-of-lists for easier plotting
|
| 142 |
+
records: List[Dict[str, Any]] = []
|
| 143 |
+
# Process in deterministic order
|
| 144 |
+
for iteration_folder in iteration_folders:
|
| 145 |
+
iteration_name = Path(iteration_folder).name
|
| 146 |
+
for pkl_path in stream_rollout_files(Path(iteration_folder)):
|
| 147 |
+
root = load_root(pkl_path)
|
| 148 |
+
|
| 149 |
+
computers = make_computers()
|
| 150 |
+
for sl in iterate_main_simulation_logs(root):
|
| 151 |
+
for comp in computers:
|
| 152 |
+
try:
|
| 153 |
+
comp.update(sl)
|
| 154 |
+
except Exception:
|
| 155 |
+
continue
|
| 156 |
+
|
| 157 |
+
values: Dict[str, Any] = {}
|
| 158 |
+
for comp in computers:
|
| 159 |
+
try:
|
| 160 |
+
values.update(comp.finalize())
|
| 161 |
+
except Exception:
|
| 162 |
+
continue
|
| 163 |
+
|
| 164 |
+
records.append(
|
| 165 |
+
{
|
| 166 |
+
"mgid": getattr(root, "id", None),
|
| 167 |
+
"crn_id": getattr(root, "crn_id", None),
|
| 168 |
+
"iteration": iteration_name,
|
| 169 |
+
"stats": values,
|
| 170 |
+
}
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
del root
|
| 174 |
+
del computers
|
| 175 |
+
gc.collect()
|
| 176 |
+
|
| 177 |
+
# Build dict-of-lists with nested stats preserved
|
| 178 |
+
# Collect all stat keys and nested agent keys where needed
|
| 179 |
+
mgids: List[Any] = []
|
| 180 |
+
crn_ids: List[Any] = []
|
| 181 |
+
iterations_out: List[str] = []
|
| 182 |
+
# stats_out is a nested structure mirroring keys but with lists
|
| 183 |
+
stats_out: Dict[str, Any] = {}
|
| 184 |
+
|
| 185 |
+
# First pass to collect union of keys
|
| 186 |
+
stat_keys: set[str] = set()
|
| 187 |
+
nested_agent_keys: Dict[str, set[str]] = {}
|
| 188 |
+
for r in records:
|
| 189 |
+
stats = r.get("stats", {}) or {}
|
| 190 |
+
for k, v in stats.items():
|
| 191 |
+
stat_keys.add(k)
|
| 192 |
+
if isinstance(v, dict):
|
| 193 |
+
nested = nested_agent_keys.setdefault(k, set())
|
| 194 |
+
for ak in v.keys():
|
| 195 |
+
nested.add(str(ak))
|
| 196 |
+
|
| 197 |
+
# Initialize structure
|
| 198 |
+
for k in stat_keys:
|
| 199 |
+
if k in nested_agent_keys:
|
| 200 |
+
stats_out[k] = {ak: [] for ak in sorted(nested_agent_keys[k])}
|
| 201 |
+
else:
|
| 202 |
+
stats_out[k] = []
|
| 203 |
+
|
| 204 |
+
# Fill lists
|
| 205 |
+
for r in records:
|
| 206 |
+
mgids.append(r.get("mgid"))
|
| 207 |
+
crn_ids.append(r.get("crn_id"))
|
| 208 |
+
iterations_out.append(r.get("iteration"))
|
| 209 |
+
stats = r.get("stats", {}) or {}
|
| 210 |
+
for k in stat_keys:
|
| 211 |
+
val = stats.get(k)
|
| 212 |
+
if isinstance(stats_out[k], dict):
|
| 213 |
+
# per-agent dict
|
| 214 |
+
agent_dict = val if isinstance(val, dict) else {}
|
| 215 |
+
for ak in stats_out[k].keys():
|
| 216 |
+
stats_out[k][ak].append(agent_dict.get(ak))
|
| 217 |
+
else:
|
| 218 |
+
stats_out[k].append(val)
|
| 219 |
+
|
| 220 |
+
with open(outfile, "w", encoding="utf-8") as w:
|
| 221 |
+
json.dump(
|
| 222 |
+
{
|
| 223 |
+
"mgid": mgids,
|
| 224 |
+
"crn_id": crn_ids,
|
| 225 |
+
"iteration": iterations_out,
|
| 226 |
+
"stats": stats_out,
|
| 227 |
+
},
|
| 228 |
+
w,
|
| 229 |
+
ensure_ascii=False,
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
return outfile
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
def run_stats_functional(
|
| 236 |
+
data_root: Path,
|
| 237 |
+
game_name: str,
|
| 238 |
+
metrics: Dict[str, Callable[[SimulationStepLog], Optional[Dict[str, float]]]],
|
| 239 |
+
output_filename: Optional[str] = None,
|
| 240 |
+
output_format: str = "json",
|
| 241 |
+
) -> Path:
|
| 242 |
+
"""
|
| 243 |
+
Functional variant where metrics is a dict of name -> f(SimulationStepLog) -> {agent_id: value}.
|
| 244 |
+
Aggregates per rollout by averaging over steps where a metric produced a value.
|
| 245 |
+
Writes a single consolidated file in data_root/statistics/.
|
| 246 |
+
"""
|
| 247 |
+
data_root = Path(data_root)
|
| 248 |
+
outdir = data_root / "statistics"
|
| 249 |
+
outdir.mkdir(parents=True, exist_ok=True)
|
| 250 |
+
default_name = (
|
| 251 |
+
f"{game_name}.stats.json"
|
| 252 |
+
if output_format == "json"
|
| 253 |
+
else f"{game_name}.stats.jsonl"
|
| 254 |
+
)
|
| 255 |
+
outfile = outdir / (
|
| 256 |
+
output_filename if output_filename is not None else default_name
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
if outfile.exists():
|
| 260 |
+
outfile.unlink()
|
| 261 |
+
|
| 262 |
+
iteration_folders = find_iteration_folders(str(data_root))
|
| 263 |
+
|
| 264 |
+
def finalize_rollout(
|
| 265 |
+
agg: Dict[str, Dict[str, List[float]]]
|
| 266 |
+
) -> Dict[str, Dict[str, float]]:
|
| 267 |
+
# avg per metric per agent
|
| 268 |
+
result: Dict[str, Dict[str, float]] = {}
|
| 269 |
+
for mname, agent_values in agg.items():
|
| 270 |
+
result[mname] = {}
|
| 271 |
+
for aid, vals in agent_values.items():
|
| 272 |
+
if not vals:
|
| 273 |
+
result[mname][aid] = None # keep alignment; could be None
|
| 274 |
+
else:
|
| 275 |
+
result[mname][aid] = sum(vals) / len(vals)
|
| 276 |
+
return result
|
| 277 |
+
|
| 278 |
+
if output_format == "jsonl":
|
| 279 |
+
with open(outfile, "w", encoding="utf-8") as w:
|
| 280 |
+
for iteration_folder in iteration_folders:
|
| 281 |
+
iteration_name = Path(iteration_folder).name
|
| 282 |
+
for pkl_path in stream_rollout_files(Path(iteration_folder)):
|
| 283 |
+
root = load_root(pkl_path)
|
| 284 |
+
|
| 285 |
+
# aggregator structure: metric -> agent_id -> list of values
|
| 286 |
+
agg: Dict[str, Dict[str, List[float]]] = {
|
| 287 |
+
m: {} for m in metrics.keys()
|
| 288 |
+
}
|
| 289 |
+
|
| 290 |
+
for sl in iterate_main_simulation_logs(root):
|
| 291 |
+
for mname, fn in metrics.items():
|
| 292 |
+
try:
|
| 293 |
+
vals = fn(sl)
|
| 294 |
+
except Exception:
|
| 295 |
+
vals = None
|
| 296 |
+
if not vals:
|
| 297 |
+
continue
|
| 298 |
+
for aid, v in vals.items():
|
| 299 |
+
if v is None:
|
| 300 |
+
continue
|
| 301 |
+
lst = agg[mname].setdefault(str(aid), [])
|
| 302 |
+
try:
|
| 303 |
+
lst.append(float(v))
|
| 304 |
+
except Exception:
|
| 305 |
+
continue
|
| 306 |
+
|
| 307 |
+
values = finalize_rollout(agg)
|
| 308 |
+
rec = {
|
| 309 |
+
"mgid": getattr(root, "id", None),
|
| 310 |
+
"crn_id": getattr(root, "crn_id", None),
|
| 311 |
+
"iteration": iteration_name,
|
| 312 |
+
"stats": values,
|
| 313 |
+
}
|
| 314 |
+
w.write(json.dumps(rec, ensure_ascii=False) + "\n")
|
| 315 |
+
|
| 316 |
+
del root
|
| 317 |
+
gc.collect()
|
| 318 |
+
else:
|
| 319 |
+
records: List[Dict[str, Any]] = []
|
| 320 |
+
for iteration_folder in iteration_folders:
|
| 321 |
+
iteration_name = Path(iteration_folder).name
|
| 322 |
+
for pkl_path in stream_rollout_files(Path(iteration_folder)):
|
| 323 |
+
root = load_root(pkl_path)
|
| 324 |
+
|
| 325 |
+
agg: Dict[str, Dict[str, List[float]]] = {m: {} for m in metrics.keys()}
|
| 326 |
+
for sl in iterate_main_simulation_logs(root):
|
| 327 |
+
for mname, fn in metrics.items():
|
| 328 |
+
try:
|
| 329 |
+
vals = fn(sl)
|
| 330 |
+
except Exception:
|
| 331 |
+
vals = None
|
| 332 |
+
if not vals:
|
| 333 |
+
continue
|
| 334 |
+
for aid, v in vals.items():
|
| 335 |
+
if v is None:
|
| 336 |
+
continue
|
| 337 |
+
lst = agg[mname].setdefault(str(aid), [])
|
| 338 |
+
try:
|
| 339 |
+
lst.append(float(v))
|
| 340 |
+
except Exception:
|
| 341 |
+
continue
|
| 342 |
+
|
| 343 |
+
values = finalize_rollout(agg)
|
| 344 |
+
records.append(
|
| 345 |
+
{
|
| 346 |
+
"mgid": getattr(root, "id", None),
|
| 347 |
+
"crn_id": getattr(root, "crn_id", None),
|
| 348 |
+
"iteration": iteration_name,
|
| 349 |
+
"stats": values,
|
| 350 |
+
}
|
| 351 |
+
)
|
| 352 |
+
|
| 353 |
+
del root
|
| 354 |
+
gc.collect()
|
| 355 |
+
|
| 356 |
+
# Build dict-of-lists output
|
| 357 |
+
mgids: List[Any] = []
|
| 358 |
+
crn_ids: List[Any] = []
|
| 359 |
+
iterations_out: List[str] = []
|
| 360 |
+
stats_out: Dict[str, Any] = {}
|
| 361 |
+
|
| 362 |
+
stat_keys: set[str] = set()
|
| 363 |
+
nested_agent_keys: Dict[str, set[str]] = {}
|
| 364 |
+
for r in records:
|
| 365 |
+
stats = r.get("stats", {}) or {}
|
| 366 |
+
for k, v in stats.items():
|
| 367 |
+
stat_keys.add(k)
|
| 368 |
+
if isinstance(v, dict):
|
| 369 |
+
nested = nested_agent_keys.setdefault(k, set())
|
| 370 |
+
for ak in v.keys():
|
| 371 |
+
nested.add(str(ak))
|
| 372 |
+
|
| 373 |
+
for k in stat_keys:
|
| 374 |
+
if k in nested_agent_keys:
|
| 375 |
+
stats_out[k] = {ak: [] for ak in sorted(nested_agent_keys[k])}
|
| 376 |
+
else:
|
| 377 |
+
stats_out[k] = []
|
| 378 |
+
|
| 379 |
+
for r in records:
|
| 380 |
+
mgids.append(r.get("mgid"))
|
| 381 |
+
crn_ids.append(r.get("crn_id"))
|
| 382 |
+
iterations_out.append(r.get("iteration"))
|
| 383 |
+
stats = r.get("stats", {}) or {}
|
| 384 |
+
for k in stat_keys:
|
| 385 |
+
val = stats.get(k)
|
| 386 |
+
if isinstance(stats_out[k], dict):
|
| 387 |
+
agent_dict = val if isinstance(val, dict) else {}
|
| 388 |
+
for ak in stats_out[k].keys():
|
| 389 |
+
stats_out[k][ak].append(agent_dict.get(ak))
|
| 390 |
+
else:
|
| 391 |
+
stats_out[k].append(val)
|
| 392 |
+
|
| 393 |
+
with open(outfile, "w", encoding="utf-8") as w:
|
| 394 |
+
json.dump(
|
| 395 |
+
{
|
| 396 |
+
"mgid": mgids,
|
| 397 |
+
"crn_id": crn_ids,
|
| 398 |
+
"iteration": iterations_out,
|
| 399 |
+
"stats": stats_out,
|
| 400 |
+
},
|
| 401 |
+
w,
|
| 402 |
+
ensure_ascii=False,
|
| 403 |
+
)
|
| 404 |
+
|
| 405 |
+
return outfile
|
src_code_for_reproducibility/markov_games/vine_ppo.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from anytree import Node, RenderTree
|
| 2 |
+
from anytree.exporter import DotExporter
|
| 3 |
+
import os.path
|
| 4 |
+
import asyncio
|
| 5 |
+
from mllm.markov_games.markov_game import MarkovGame
|
| 6 |
+
|
| 7 |
+
async def VinePPORunner(
|
| 8 |
+
markov_game: MarkovGame,
|
| 9 |
+
**kwargs):
|
| 10 |
+
pass
|
src_code_for_reproducibility/models/__init__.py
ADDED
|
File without changes
|
src_code_for_reproducibility/models/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (153 Bytes). View file
|
|
|
src_code_for_reproducibility/models/__pycache__/adapter_training_wrapper.cpython-312.pyc
ADDED
|
Binary file (4.92 kB). View file
|
|
|
src_code_for_reproducibility/models/__pycache__/human_policy.cpython-312.pyc
ADDED
|
Binary file (11.9 kB). View file
|
|
|
src_code_for_reproducibility/models/__pycache__/inference_backend.cpython-312.pyc
ADDED
|
Binary file (2.24 kB). View file
|
|
|
src_code_for_reproducibility/models/__pycache__/inference_backend_dummy.cpython-312.pyc
ADDED
|
Binary file (2.34 kB). View file
|
|
|
src_code_for_reproducibility/models/__pycache__/inference_backend_sglang.cpython-312.pyc
ADDED
|
Binary file (3.67 kB). View file
|
|
|
src_code_for_reproducibility/models/__pycache__/inference_backend_vllm.cpython-312.pyc
ADDED
|
Binary file (4.98 kB). View file
|
|
|
src_code_for_reproducibility/models/__pycache__/large_language_model_api.cpython-312.pyc
ADDED
|
Binary file (6.94 kB). View file
|
|
|
src_code_for_reproducibility/models/__pycache__/large_language_model_local.cpython-312.pyc
ADDED
|
Binary file (16.7 kB). View file
|
|
|
src_code_for_reproducibility/models/adapter_training_wrapper.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import logging
|
| 4 |
+
from typing import Union
|
| 5 |
+
from peft import (
|
| 6 |
+
LoraConfig,
|
| 7 |
+
get_peft_model,
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
logger = logging.getLogger(__name__)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class AdapterWrapper(nn.Module):
|
| 14 |
+
"""
|
| 15 |
+
A thin façade that
|
| 16 |
+
• keeps a reference to a *shared* PEFT-wrapped model,
|
| 17 |
+
• ensures `set_adapter(adapter)` is called on every forward,
|
| 18 |
+
• exposes only the parameters that should be trained for that adapter
|
| 19 |
+
(plus whatever extra modules you name).
|
| 20 |
+
"""
|
| 21 |
+
def __init__(
|
| 22 |
+
self,
|
| 23 |
+
shared_llm: nn.Module,
|
| 24 |
+
adapter_id: str,
|
| 25 |
+
lora_config: dict,
|
| 26 |
+
path: Union[str, None] = None,
|
| 27 |
+
):
|
| 28 |
+
super().__init__()
|
| 29 |
+
self.shared_llm = shared_llm
|
| 30 |
+
self.adapter_id = adapter_id
|
| 31 |
+
lora_config = LoraConfig(**lora_config)
|
| 32 |
+
# this modifies the shared llm in place, adding a lora adapter inside
|
| 33 |
+
self.shared_llm = get_peft_model(
|
| 34 |
+
model=shared_llm,
|
| 35 |
+
peft_config=lora_config,
|
| 36 |
+
adapter_name=adapter_id,
|
| 37 |
+
)
|
| 38 |
+
self.shared_llm.train()
|
| 39 |
+
# Load external adapter weights if provided
|
| 40 |
+
loaded_from: str | None = None
|
| 41 |
+
if path:
|
| 42 |
+
try:
|
| 43 |
+
# Supports both local filesystem paths and HF Hub repo IDs
|
| 44 |
+
self.shared_llm.load_adapter(
|
| 45 |
+
is_trainable=True,
|
| 46 |
+
model_id=path,
|
| 47 |
+
adapter_name=adapter_id,
|
| 48 |
+
)
|
| 49 |
+
loaded_from = path
|
| 50 |
+
except Exception as exc: # noqa: BLE001 - want to log any load failure context
|
| 51 |
+
logger.warning(
|
| 52 |
+
f"Adapter '{adapter_id}': failed to load from '{path}': {exc}"
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
if loaded_from:
|
| 56 |
+
logger.info(
|
| 57 |
+
f"Adapter '{adapter_id}': loaded initial weights from '{loaded_from}'."
|
| 58 |
+
)
|
| 59 |
+
else:
|
| 60 |
+
logger.info(
|
| 61 |
+
f"Adapter '{adapter_id}': initialized with fresh weights (no initial weights found)."
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
def parameters(self, recurse: bool = True):
|
| 65 |
+
"""
|
| 66 |
+
"recurse" is just for pytorch compatibility
|
| 67 |
+
"""
|
| 68 |
+
self.shared_llm.set_adapter(self.adapter_id)
|
| 69 |
+
params = [p for p in self.shared_llm.parameters() if p.requires_grad]
|
| 70 |
+
|
| 71 |
+
return params
|
| 72 |
+
|
| 73 |
+
def get_base_model_logits(self, contexts):
|
| 74 |
+
"""
|
| 75 |
+
Run the base model (without adapter) in inference mode, without tracking gradients.
|
| 76 |
+
This is useful to get reference logits for KL-divergence computation.
|
| 77 |
+
"""
|
| 78 |
+
with torch.no_grad():
|
| 79 |
+
with self.shared_llm.disable_adapter():
|
| 80 |
+
return self.shared_llm(input_ids=contexts)[0]
|
| 81 |
+
|
| 82 |
+
def forward(self, *args, **kwargs):
|
| 83 |
+
self.shared_llm.set_adapter(self.adapter_id)
|
| 84 |
+
return self.shared_llm(*args, **kwargs)
|
| 85 |
+
|
| 86 |
+
def save_pretrained(self, save_path):
|
| 87 |
+
self.shared_llm.save_pretrained(save_path)
|
| 88 |
+
|
| 89 |
+
def gradient_checkpointing_enable(self, *args, **kwargs):
|
| 90 |
+
self.shared_llm.gradient_checkpointing_enable(*args, **kwargs)
|
| 91 |
+
|
| 92 |
+
@property
|
| 93 |
+
def dtype(self):
|
| 94 |
+
return self.shared_llm.dtype
|
| 95 |
+
|
| 96 |
+
@property
|
| 97 |
+
def device(self):
|
| 98 |
+
return self.shared_llm.device
|