Muqeeth commited on
Commit
166a3a2
·
verified ·
1 Parent(s): 63f8a1e

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. src_code_for_reproducibility/__pycache__/__init__.cpython-311.pyc +0 -0
  2. src_code_for_reproducibility/__pycache__/__init__.cpython-312.pyc +0 -0
  3. src_code_for_reproducibility/chat_utils/apply_template.py +84 -0
  4. src_code_for_reproducibility/chat_utils/chat_turn.py +27 -0
  5. src_code_for_reproducibility/chat_utils/template_specific.py +109 -0
  6. src_code_for_reproducibility/docs/Makefile +19 -0
  7. src_code_for_reproducibility/docs/generate_docs.py +249 -0
  8. src_code_for_reproducibility/docs/make.bat +35 -0
  9. src_code_for_reproducibility/markov_games/__init__.py +0 -0
  10. src_code_for_reproducibility/markov_games/__pycache__/__init__.cpython-311.pyc +0 -0
  11. src_code_for_reproducibility/markov_games/__pycache__/__init__.cpython-312.pyc +0 -0
  12. src_code_for_reproducibility/markov_games/__pycache__/agent.cpython-312.pyc +0 -0
  13. src_code_for_reproducibility/markov_games/__pycache__/alternative_actions_runner.cpython-312.pyc +0 -0
  14. src_code_for_reproducibility/markov_games/__pycache__/gather_and_export_utils.cpython-312.pyc +0 -0
  15. src_code_for_reproducibility/markov_games/__pycache__/group_timesteps.cpython-312.pyc +0 -0
  16. src_code_for_reproducibility/markov_games/__pycache__/linear_runner.cpython-312.pyc +0 -0
  17. src_code_for_reproducibility/markov_games/__pycache__/markov_game.cpython-312.pyc +0 -0
  18. src_code_for_reproducibility/markov_games/__pycache__/mg_utils.cpython-312.pyc +0 -0
  19. src_code_for_reproducibility/markov_games/__pycache__/rollout_tree.cpython-311.pyc +0 -0
  20. src_code_for_reproducibility/markov_games/__pycache__/rollout_tree.cpython-312.pyc +0 -0
  21. src_code_for_reproducibility/markov_games/__pycache__/run_markov_games.cpython-312.pyc +0 -0
  22. src_code_for_reproducibility/markov_games/__pycache__/simulation.cpython-312.pyc +0 -0
  23. src_code_for_reproducibility/markov_games/agent.py +76 -0
  24. src_code_for_reproducibility/markov_games/alternative_actions_runner.py +138 -0
  25. src_code_for_reproducibility/markov_games/group_timesteps.py +150 -0
  26. src_code_for_reproducibility/markov_games/linear_runner.py +30 -0
  27. src_code_for_reproducibility/markov_games/markov_game.py +208 -0
  28. src_code_for_reproducibility/markov_games/mg_utils.py +89 -0
  29. src_code_for_reproducibility/markov_games/negotiation/nego_agent.py +242 -0
  30. src_code_for_reproducibility/markov_games/negotiation/nego_simulation.py +241 -0
  31. src_code_for_reproducibility/markov_games/negotiation/negotiation_statistics.py +244 -0
  32. src_code_for_reproducibility/markov_games/negotiation/no_press_nego_simulation.py +168 -0
  33. src_code_for_reproducibility/markov_games/negotiation/tas_rps_simulation.py +248 -0
  34. src_code_for_reproducibility/markov_games/negotiation/tas_simulation.py +172 -0
  35. src_code_for_reproducibility/markov_games/rollout_tree.py +86 -0
  36. src_code_for_reproducibility/markov_games/run_markov_games.py +24 -0
  37. src_code_for_reproducibility/markov_games/simulation.py +87 -0
  38. src_code_for_reproducibility/markov_games/vine_ppo.py +10 -0
  39. src_code_for_reproducibility/models/__pycache__/__init__.cpython-312.pyc +0 -0
  40. src_code_for_reproducibility/models/__pycache__/adapter_training_wrapper.cpython-312.pyc +0 -0
  41. src_code_for_reproducibility/models/__pycache__/human_policy.cpython-312.pyc +0 -0
  42. src_code_for_reproducibility/models/__pycache__/inference_backend.cpython-312.pyc +0 -0
  43. src_code_for_reproducibility/models/__pycache__/inference_backend_dummy.cpython-312.pyc +0 -0
  44. src_code_for_reproducibility/models/__pycache__/inference_backend_sglang.cpython-312.pyc +0 -0
  45. src_code_for_reproducibility/models/__pycache__/inference_backend_vllm.cpython-312.pyc +0 -0
  46. src_code_for_reproducibility/models/__pycache__/large_language_model_api.cpython-312.pyc +0 -0
  47. src_code_for_reproducibility/models/__pycache__/large_language_model_local.cpython-312.pyc +0 -0
  48. src_code_for_reproducibility/models/__pycache__/scalar_critic.cpython-312.pyc +0 -0
  49. src_code_for_reproducibility/training/__pycache__/__init__.cpython-312.pyc +0 -0
  50. src_code_for_reproducibility/training/__pycache__/annealing_methods.cpython-312.pyc +0 -0
src_code_for_reproducibility/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (160 Bytes). View file
 
src_code_for_reproducibility/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (148 Bytes). View file
 
src_code_for_reproducibility/chat_utils/apply_template.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from mllm.chat_utils.chat_turn import ChatTurn
4
+ from mllm.chat_utils.template_specific import (
5
+ custom_gemma3_template,
6
+ custom_llama3_template,
7
+ custom_qwen2_template,
8
+ custom_qwen3_template,
9
+ gemma3_assistant_postfix,
10
+ qwen2_assistant_postfix,
11
+ qwen3_assistant_postfix,
12
+ )
13
+
14
+
15
+ def get_custom_chat_template(tokenizer) -> str:
16
+ """
17
+ Get the chat template for the tokenizer.
18
+ """
19
+ if "qwen2" in tokenizer.name_or_path.lower():
20
+ return custom_qwen2_template
21
+ elif "llama" in tokenizer.name_or_path.lower():
22
+ return custom_llama3_template
23
+ elif "qwen3" in tokenizer.name_or_path.lower():
24
+ return custom_qwen3_template
25
+ elif "gemma" in tokenizer.name_or_path.lower():
26
+ return custom_gemma3_template
27
+ else:
28
+ raise ValueError(f"Tokenizer {tokenizer.name_or_path} not supported")
29
+
30
+
31
+ def get_custom_assistant_postfix(tokenizer) -> torch.Tensor:
32
+ """
33
+ Get the custom assistant postfix for the tokenizer.
34
+ """
35
+ if "qwen2" in tokenizer.name_or_path.lower():
36
+ return qwen2_assistant_postfix
37
+ elif "qwen3" in tokenizer.name_or_path.lower():
38
+ return qwen3_assistant_postfix
39
+ elif "gemma" in tokenizer.name_or_path.lower():
40
+ return gemma3_assistant_postfix
41
+ return torch.tensor([], dtype=torch.long)
42
+
43
+
44
+ def tokenize_chats(chats: list[ChatTurn], tokenizer, enable_thinking) -> None:
45
+ """
46
+ Set the chat_template_token_ids for each chat turn.
47
+ # TODO: use engine tokens if available
48
+ """
49
+ custom_template = get_custom_chat_template(tokenizer)
50
+ custom_assistant_postfix: torch.Tensor = get_custom_assistant_postfix(tokenizer)
51
+ for i, chat in enumerate(chats):
52
+ if chat.chat_template_token_ids is None:
53
+ if chat.role == "user":
54
+ next_chat = chats[i + 1] if i + 1 < len(chats) else None
55
+ add_generation_prompt = True
56
+ if next_chat and next_chat.role == "user":
57
+ add_generation_prompt = False
58
+ encoded_chat = tokenizer.apply_chat_template(
59
+ [chat],
60
+ return_tensors="pt",
61
+ chat_template=custom_template,
62
+ add_generation_prompt=add_generation_prompt,
63
+ add_system_prompt=True if i == 0 else False,
64
+ enable_thinking=enable_thinking,
65
+ ).flatten()
66
+ previous_chat = chats[i - 1] if i > 0 else None
67
+ if previous_chat and previous_chat.role == "assistant":
68
+ encoded_chat = torch.cat([custom_assistant_postfix, encoded_chat])
69
+ elif chat.role == "assistant":
70
+ encoded_chat = chat.out_token_ids
71
+ chat.chat_template_token_ids = encoded_chat
72
+
73
+
74
+ def chat_turns_to_token_ids(
75
+ chats: list[ChatTurn], tokenizer, enable_thinking
76
+ ) -> list[int]:
77
+ """
78
+ Tokenize the chat turns and set the chat_template_token_ids for each chat turn.
79
+ """
80
+ tokenize_chats(chats=chats, tokenizer=tokenizer, enable_thinking=enable_thinking)
81
+ token_ids = []
82
+ for chat in chats:
83
+ token_ids.append(chat.chat_template_token_ids)
84
+ return torch.cat(token_ids)
src_code_for_reproducibility/chat_utils/chat_turn.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ from dataclasses import dataclass
5
+ from pathlib import Path
6
+ from typing import Any, List, Literal, Optional, Tuple
7
+
8
+ import jsonschema
9
+ import torch
10
+ from pydantic import BaseModel, ConfigDict, Field, model_validator
11
+
12
+ AgentId = str
13
+
14
+
15
+ class ChatTurn(BaseModel):
16
+ model_config = ConfigDict(arbitrary_types_allowed=True) # needed for torch tensors
17
+
18
+ role: str = Field(pattern="^(user|assistant)$")
19
+ agent_id: AgentId # ID of the agent with which the chat occured
20
+ content: str
21
+ reasoning_content: str | None = None
22
+ chat_template_token_ids: torch.LongTensor | None = None # Token ids of chat template format. For example, token ids of "<assistant>{content}</assistant>""
23
+ out_token_ids: torch.LongTensor | None = (
24
+ None # tokens generated from inference engine
25
+ )
26
+ log_probs: torch.FloatTensor | None = None
27
+ is_state_end: bool = False # indicates whether this chat turn marks the end of a state in the trajectory
src_code_for_reproducibility/chat_utils/template_specific.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import huggingface_hub
2
+ import torch
3
+ from transformers import AutoTokenizer
4
+
5
+ custom_llama3_template = """
6
+ {%- if add_system_prompt %}
7
+ {{- '<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nCutting Knowledge Date: December 2023\nToday Date: 26 Jul 2024\n\n<|eot_id|>' }}
8
+ {%- endif %}
9
+ {%- for message in messages %}
10
+ {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' + message['content'] | trim + '<|eot_id|>' }}
11
+ {%- endfor %}
12
+
13
+ {%- if add_generation_prompt %}
14
+ {{- '<|start_header_id|>' + 'assistant' + '<|end_header_id|>\n\n' }}
15
+ {%- endif %}
16
+ """
17
+
18
+ qwen2_assistant_postfix = (
19
+ AutoTokenizer.from_pretrained("Qwen/Qwen2.5-7B-Instruct")
20
+ .encode("\n", return_tensors="pt")
21
+ .flatten()
22
+ )
23
+ qwen3_assistant_postfix = (
24
+ AutoTokenizer.from_pretrained("Qwen/Qwen3-8B")
25
+ .encode("\n", return_tensors="pt")
26
+ .flatten()
27
+ )
28
+ gemma3_assistant_postfix = (
29
+ AutoTokenizer.from_pretrained("google/gemma-3-4b-it")
30
+ .encode("\n", return_tensors="pt")
31
+ .flatten()
32
+ )
33
+ custom_qwen2_template = """
34
+ {%- if add_system_prompt %}
35
+ {{- '<|im_start|>system\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\n' }}
36
+ {%- endif %}
37
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
38
+ {%- for message in messages %}
39
+ {%- if message.content is string %}
40
+ {%- set content = message.content %}
41
+ {%- else %}
42
+ {%- set content = '' %}
43
+ {%- endif %}
44
+ {%- if (message.role == "user") %}
45
+ {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
46
+ {%- elif message.role == "assistant" %}
47
+ {%- set reasoning_content = '' %}
48
+ {%- if message.reasoning_content is string %}
49
+ {%- set reasoning_content = message.reasoning_content %}
50
+ {%- else %}
51
+ {%- if '</think>' in content %}
52
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
53
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
54
+ {%- endif %}
55
+ {%- endif %}
56
+ {%- if loop.index0 > ns.last_query_index %}
57
+ {%- if reasoning_content %}
58
+ {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
59
+ {%- else %}
60
+ {{- '<|im_start|>' + message.role + '\n' + content }}
61
+ {%- endif %}
62
+ {%- else %}
63
+ {{- '<|im_start|>' + message.role + '\n' + content }}
64
+ {%- endif %}
65
+ {{- '<|im_end|>\n' }}
66
+ {%- endif %}
67
+ {%- endfor %}
68
+ {%- if add_generation_prompt %}
69
+ {{- '<|im_start|>assistant\n' }}
70
+ {%- endif %}
71
+ """
72
+
73
+ custom_qwen3_template = """
74
+ {%- for message in messages %}
75
+ {%- if message.content is string %}
76
+ {%- set content = message.content %}
77
+ {%- else %}
78
+ {%- set content = '' %}
79
+ {%- endif %}
80
+ {%- if (message.role == "user") %}
81
+ {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
82
+ {%- elif message.role == "assistant" %}
83
+ {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
84
+ {%- endif %}
85
+ {%- endfor %}
86
+ {%- if add_generation_prompt %}
87
+ {{- '<|im_start|>assistant\n' }}
88
+ {%- if enable_thinking is defined and enable_thinking is false %}
89
+ {{- '<think>\n\n</think>\n\n' }}
90
+ {%- endif %}
91
+ {%- endif %}
92
+ """
93
+
94
+ custom_gemma3_template = """
95
+ {%- if add_system_prompt %}
96
+ {{- bos_token -}}
97
+ {%- endif %}
98
+ {%- for message in messages -%}
99
+ {%- if message['role'] == 'assistant' -%}
100
+ {%- set role = 'model' -%}
101
+ {%- else -%}
102
+ {%- set role = message['role'] -%}
103
+ {%- endif -%}
104
+ {{ '<start_of_turn>' + role + '\n' + message['content'] | trim + '<end_of_turn>\n' }}
105
+ {%- endfor -%}
106
+ {%- if add_generation_prompt -%}
107
+ {{ '<start_of_turn>model\n' }}
108
+ {%- endif -%}
109
+ """
src_code_for_reproducibility/docs/Makefile ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Minimal makefile for Sphinx documentation
2
+
3
+ # You can set these variables from the command line, and also
4
+ # from the environment for the first two.
5
+ SPHINXOPTS ?=
6
+ SPHINXBUILD ?= sphinx-build
7
+ SOURCEDIR = source
8
+ BUILDDIR = build
9
+
10
+ # Put it first so that "make" without argument is like "make help".
11
+ help:
12
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(SPHINXFLAGS)
13
+
14
+ .PHONY: help Makefile
15
+
16
+ # Catch-all target: route all unknown targets to Sphinx using the new
17
+ # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
18
+ %: Makefile
19
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(SPHINXFLAGS)
src_code_for_reproducibility/docs/generate_docs.py ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Script to automatically generate Sphinx documentation for all modules and build the HTML website.
4
+ """
5
+ import importlib.util
6
+ import os
7
+ import subprocess
8
+ import sys
9
+
10
+
11
+ def check_and_install_dependencies():
12
+ """Check for required dependencies and install them if missing."""
13
+ required_packages = [
14
+ "sphinx",
15
+ "sphinx-rtd-theme",
16
+ "sphinxcontrib-napoleon",
17
+ "sphinxcontrib-mermaid",
18
+ "sphinx-autodoc-typehints",
19
+ ]
20
+
21
+ missing_packages = []
22
+
23
+ for package in required_packages:
24
+ # Convert package name to module name (replace - with _)
25
+ module_name = package.replace("-", "_")
26
+
27
+ # Check if the package is installed
28
+ if importlib.util.find_spec(module_name) is None:
29
+ missing_packages.append(package)
30
+
31
+ # Install missing packages
32
+ if missing_packages:
33
+ print(f"Installing missing dependencies: {', '.join(missing_packages)}")
34
+ subprocess.check_call(
35
+ [sys.executable, "-m", "pip", "install"] + missing_packages
36
+ )
37
+ print("Dependencies installed successfully")
38
+ else:
39
+ print("All required dependencies are already installed")
40
+
41
+
42
+ def create_makefile(docs_dir):
43
+ """Create a Makefile for Sphinx documentation if it doesn't exist."""
44
+ makefile_path = os.path.join(docs_dir, "Makefile")
45
+
46
+ if os.path.exists(makefile_path):
47
+ print(f"Makefile already exists at {makefile_path}")
48
+ return
49
+
50
+ print(f"Creating Makefile at {makefile_path}")
51
+
52
+ makefile_content = """# Minimal makefile for Sphinx documentation
53
+
54
+ # You can set these variables from the command line, and also
55
+ # from the environment for the first two.
56
+ SPHINXOPTS ?=
57
+ SPHINXBUILD ?= sphinx-build
58
+ SOURCEDIR = source
59
+ BUILDDIR = build
60
+
61
+ # Put it first so that "make" without argument is like "make help".
62
+ help:
63
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(SPHINXFLAGS)
64
+
65
+ .PHONY: help Makefile
66
+
67
+ # Catch-all target: route all unknown targets to Sphinx using the new
68
+ # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
69
+ %: Makefile
70
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(SPHINXFLAGS)
71
+ """
72
+
73
+ with open(makefile_path, "w") as f:
74
+ f.write(makefile_content)
75
+
76
+ print("Makefile created successfully")
77
+
78
+
79
+ def create_make_bat(docs_dir):
80
+ """Create a make.bat file for Windows if it doesn't exist."""
81
+ make_bat_path = os.path.join(docs_dir, "make.bat")
82
+
83
+ if os.path.exists(make_bat_path):
84
+ print(f"make.bat already exists at {make_bat_path}")
85
+ return
86
+
87
+ print(f"Creating make.bat at {make_bat_path}")
88
+
89
+ make_bat_content = """@ECHO OFF
90
+
91
+ pushd %~dp0
92
+
93
+ REM Command file for Sphinx documentation
94
+
95
+ if "%SPHINXBUILD%" == "" (
96
+ set SPHINXBUILD=sphinx-build
97
+ )
98
+ set SOURCEDIR=source
99
+ set BUILDDIR=build
100
+
101
+ %SPHINXBUILD% >NUL 2>NUL
102
+ if errorlevel 9009 (
103
+ echo.
104
+ echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
105
+ echo.installed, then set the SPHINXBUILD environment variable to point
106
+ echo.to the full path of the 'sphinx-build' executable. Alternatively you
107
+ echo.may add the Sphinx directory to PATH.
108
+ echo.
109
+ echo.If you don't have Sphinx installed, grab it from
110
+ echo.https://www.sphinx-doc.org/
111
+ exit /b 1
112
+ )
113
+
114
+ if "%1" == "" goto help
115
+
116
+ %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
117
+ goto end
118
+
119
+ :help
120
+ %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
121
+
122
+ :end
123
+ popd
124
+ """
125
+
126
+ with open(make_bat_path, "w") as f:
127
+ f.write(make_bat_content)
128
+
129
+ print("make.bat created successfully")
130
+
131
+
132
+ def main():
133
+ # Check and install required dependencies
134
+ print("=== Checking dependencies ===")
135
+ check_and_install_dependencies()
136
+
137
+ # Get the directory of this script
138
+ script_dir = os.path.dirname(os.path.abspath(__file__))
139
+
140
+ # Path to the project root
141
+ project_root = os.path.dirname(script_dir)
142
+
143
+ # Path to the source directory
144
+ source_dir = os.path.join(project_root, "src")
145
+
146
+ # Path to the docs source directory
147
+ docs_source_dir = os.path.join(script_dir, "source")
148
+
149
+ # Print paths for debugging
150
+ print(f"Script directory: {script_dir}")
151
+ print(f"Project root: {project_root}")
152
+ print(f"Source directory: {source_dir}")
153
+ print(f"Docs source directory: {docs_source_dir}")
154
+
155
+ # Make sure the source directory exists
156
+ if not os.path.exists(source_dir):
157
+ print(f"Error: Source directory {source_dir} does not exist!")
158
+ sys.exit(1)
159
+
160
+ # Make sure the docs source directory exists
161
+ if not os.path.exists(docs_source_dir):
162
+ print(f"Creating docs source directory: {docs_source_dir}")
163
+ os.makedirs(docs_source_dir)
164
+
165
+ # Step 1: Run sphinx-apidoc to generate .rst files for all modules
166
+ print("\n=== Generating API documentation ===")
167
+ cmd = [
168
+ "sphinx-apidoc",
169
+ "-f", # Force overwriting of existing files
170
+ "-e", # Put module documentation before submodule documentation
171
+ "-M", # Put module documentation before subpackage documentation
172
+ "-o",
173
+ docs_source_dir, # Output directory
174
+ source_dir, # Source code directory
175
+ ]
176
+
177
+ print(f"Running command: {' '.join(cmd)}")
178
+ result = subprocess.run(cmd, capture_output=True, text=True)
179
+
180
+ # Print the output of the command
181
+ print("STDOUT:")
182
+ print(result.stdout)
183
+
184
+ print("STDERR:")
185
+ print(result.stderr)
186
+
187
+ if result.returncode != 0:
188
+ print(f"Error: sphinx-apidoc failed with return code {result.returncode}")
189
+ sys.exit(1)
190
+
191
+ # List the files in the docs source directory
192
+ print("\nFiles in docs/source directory:")
193
+ for file in sorted(os.listdir(docs_source_dir)):
194
+ print(f" {file}")
195
+
196
+ print("\nDocumentation source files generated successfully!")
197
+
198
+ # Step 2: Create Makefile and make.bat if they don't exist
199
+ create_makefile(script_dir)
200
+ create_make_bat(script_dir)
201
+
202
+ # Step 3: Build the HTML documentation
203
+ print("\n=== Building HTML documentation ===")
204
+
205
+ # Determine the build command based on the platform
206
+ if os.name == "nt": # Windows
207
+ build_cmd = ["make.bat", "html"]
208
+ else: # Unix/Linux/Mac
209
+ build_cmd = ["make", "html"]
210
+
211
+ # Change to the docs directory to run the build command
212
+ os.chdir(script_dir)
213
+
214
+ print(f"Running command: {' '.join(build_cmd)}")
215
+ build_result = subprocess.run(build_cmd, capture_output=True, text=True)
216
+
217
+ # Print the output of the build command
218
+ print("STDOUT:")
219
+ print(build_result.stdout)
220
+
221
+ print("STDERR:")
222
+ print(build_result.stderr)
223
+
224
+ if build_result.returncode != 0:
225
+ print(f"Error: HTML build failed with return code {build_result.returncode}")
226
+ sys.exit(1)
227
+
228
+ # Get the path to the built HTML documentation
229
+ html_dir = os.path.join(script_dir, "build", "html")
230
+ index_path = os.path.join(html_dir, "index.html")
231
+
232
+ if os.path.exists(index_path):
233
+ print(f"\nHTML documentation built successfully!")
234
+ print(f"You can view it by opening: {index_path}")
235
+
236
+ # Try to open the documentation in a browser
237
+ try:
238
+ import webbrowser
239
+
240
+ print("\nAttempting to open documentation in your default browser...")
241
+ webbrowser.open(f"file://{index_path}")
242
+ except Exception as e:
243
+ print(f"Could not open browser automatically: {e}")
244
+ else:
245
+ print(f"\nWarning: HTML index file not found at {index_path}")
246
+
247
+
248
+ if __name__ == "__main__":
249
+ main()
src_code_for_reproducibility/docs/make.bat ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @ECHO OFF
2
+
3
+ pushd %~dp0
4
+
5
+ REM Command file for Sphinx documentation
6
+
7
+ if "%SPHINXBUILD%" == "" (
8
+ set SPHINXBUILD=sphinx-build
9
+ )
10
+ set SOURCEDIR=source
11
+ set BUILDDIR=build
12
+
13
+ %SPHINXBUILD% >NUL 2>NUL
14
+ if errorlevel 9009 (
15
+ echo.
16
+ echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
17
+ echo.installed, then set the SPHINXBUILD environment variable to point
18
+ echo.to the full path of the 'sphinx-build' executable. Alternatively you
19
+ echo.may add the Sphinx directory to PATH.
20
+ echo.
21
+ echo.If you don't have Sphinx installed, grab it from
22
+ echo.https://www.sphinx-doc.org/
23
+ exit /b 1
24
+ )
25
+
26
+ if "%1" == "" goto help
27
+
28
+ %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29
+ goto end
30
+
31
+ :help
32
+ %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33
+
34
+ :end
35
+ popd
src_code_for_reproducibility/markov_games/__init__.py ADDED
File without changes
src_code_for_reproducibility/markov_games/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (173 Bytes). View file
 
src_code_for_reproducibility/markov_games/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (161 Bytes). View file
 
src_code_for_reproducibility/markov_games/__pycache__/agent.cpython-312.pyc ADDED
Binary file (3.2 kB). View file
 
src_code_for_reproducibility/markov_games/__pycache__/alternative_actions_runner.cpython-312.pyc ADDED
Binary file (4.95 kB). View file
 
src_code_for_reproducibility/markov_games/__pycache__/gather_and_export_utils.cpython-312.pyc ADDED
Binary file (46.5 kB). View file
 
src_code_for_reproducibility/markov_games/__pycache__/group_timesteps.cpython-312.pyc ADDED
Binary file (6.17 kB). View file
 
src_code_for_reproducibility/markov_games/__pycache__/linear_runner.cpython-312.pyc ADDED
Binary file (1.25 kB). View file
 
src_code_for_reproducibility/markov_games/__pycache__/markov_game.cpython-312.pyc ADDED
Binary file (9.72 kB). View file
 
src_code_for_reproducibility/markov_games/__pycache__/mg_utils.cpython-312.pyc ADDED
Binary file (3.98 kB). View file
 
src_code_for_reproducibility/markov_games/__pycache__/rollout_tree.cpython-311.pyc ADDED
Binary file (4.75 kB). View file
 
src_code_for_reproducibility/markov_games/__pycache__/rollout_tree.cpython-312.pyc ADDED
Binary file (3.67 kB). View file
 
src_code_for_reproducibility/markov_games/__pycache__/run_markov_games.cpython-312.pyc ADDED
Binary file (1.14 kB). View file
 
src_code_for_reproducibility/markov_games/__pycache__/simulation.cpython-312.pyc ADDED
Binary file (3.9 kB). View file
 
src_code_for_reproducibility/markov_games/agent.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ In simple RL paradise, where the action dimensions are constant and well defined,
3
+ Agent classes are not necessary. But in MARL, with LLM's, there isn't always
4
+ a direct path from policy to action. For instance, from the observation of the environment,
5
+ a prompt must be created. Then, the outputs of the policy might be incorrect, so a second
6
+ request to the LLM must be sent before the action is well defined. This is why this Agent class exists.
7
+ It acts as a mini environment, bridging the gap between the core simulation and
8
+ the LLM policies.
9
+ """
10
+
11
+ from abc import ABC, abstractmethod
12
+ from collections.abc import Callable
13
+ from typing import Any, Tuple
14
+
15
+ from numpy.random import default_rng
16
+
17
+ from mllm.markov_games.rollout_tree import AgentActLog
18
+
19
+
20
+ class Agent(ABC):
21
+ @abstractmethod
22
+ def __init__(
23
+ self,
24
+ seed: int,
25
+ agent_id: str,
26
+ agent_name: str,
27
+ agent_policy: Callable[[list[dict]], str],
28
+ *args,
29
+ **kwargs,
30
+ ):
31
+ """
32
+ Initialize the agent state.
33
+ """
34
+ self.seed = seed
35
+ self.agent_id = agent_id
36
+ self.agent_name = agent_name
37
+ self.policy = policy
38
+ self.rng = default_rng(self.seed)
39
+ raise NotImplementedError
40
+
41
+ async def act(self, observation) -> Tuple[Any, AgentActLog]:
42
+ """
43
+ Query (possibly multiple times) a policy (or possibly a pool of policies) to
44
+ obtain the action of the agent.
45
+
46
+ Example:
47
+ action = None
48
+ prompt = self.observation_to_prompt(observation)
49
+ while not self.valid(action):
50
+ output = await self.policy.generate(prompt)
51
+ action = self.policy_output_to_action(output)
52
+ return action
53
+
54
+ Returns:
55
+ action
56
+ step_info
57
+ """
58
+ raise NotImplementedError
59
+
60
+ def get_safe_copy(self):
61
+ """
62
+ Return copy of the agent object that is decorrelated from the original object.
63
+ """
64
+ raise NotImplementedError
65
+
66
+ def reset(self):
67
+ raise NotImplementedError
68
+
69
+ def render(self):
70
+ raise NotImplementedError
71
+
72
+ def close(self):
73
+ raise NotImplementedError
74
+
75
+ def get_agent_info(self):
76
+ raise NotImplementedError
src_code_for_reproducibility/markov_games/alternative_actions_runner.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import copy
3
+ import json
4
+ import os.path
5
+ from typing import Any, Tuple
6
+
7
+ from mllm.markov_games.markov_game import AgentAndActionSafeCopy, MarkovGame
8
+ from mllm.markov_games.rollout_tree import (
9
+ AgentActLog,
10
+ RolloutTreeBranchNode,
11
+ RolloutTreeNode,
12
+ RolloutTreeRootNode,
13
+ StepLog,
14
+ )
15
+
16
+ AgentId = str
17
+
18
+
19
+
20
+ async def run_with_unilateral_alt_action(
21
+ markov_game: MarkovGame,
22
+ agent_id: AgentId,
23
+ time_step: int,
24
+ branch_node: RolloutTreeBranchNode,
25
+ max_depth: int,
26
+ ):
27
+ """
28
+ This function is used to generate a new branch for a given agent.
29
+ """
30
+
31
+ # Generate alternative action and take a step
32
+ await markov_game.set_action_of_agent(agent_id)
33
+ terminated: bool = markov_game.take_simulation_step()
34
+ step_log = markov_game.get_step_log()
35
+ first_alternative_node = RolloutTreeNode(
36
+ step_log=step_log,
37
+ time_step=time_step,
38
+ )
39
+
40
+ # Generate rest of trajectory up to max depth
41
+ time_step += 1
42
+ counter = 1
43
+ previous_node = first_alternative_node
44
+ while not terminated and counter <= max_depth:
45
+ terminated, step_log = await markov_game.step()
46
+ current_node = RolloutTreeNode(step_log=step_log, time_step=time_step)
47
+ previous_node.child = current_node
48
+ previous_node = current_node
49
+ counter += 1
50
+ time_step += 1
51
+
52
+ if branch_node.branches == None:
53
+ branch_node.branches = {agent_id: [first_alternative_node]}
54
+ else:
55
+ agent_branches = branch_node.branches.get(agent_id, [])
56
+ agent_branches.append(first_alternative_node)
57
+ branch_node.branches[agent_id] = agent_branches
58
+
59
+
60
+ async def AlternativeActionsRunner(
61
+ markov_game: MarkovGame,
62
+ output_folder: str,
63
+ nb_alternative_actions: int,
64
+ max_depth: int,
65
+ branch_only_on_new_round: bool = False,
66
+ ):
67
+ """
68
+ This method generates a trajectory with partially completed branches,
69
+ where the branching comes from taking unilateraly different actions.
70
+ The resulting data is used to estimate the updated advantage alignment policy gradient terms.
71
+ Let k := nb_sub_steps. Then the number of steps generated is O(Tk), where T is
72
+ the maximum trajectory length.
73
+ """
74
+
75
+ tasks = []
76
+ time_step = 0
77
+ terminated = False
78
+ root = RolloutTreeRootNode(
79
+ id=markov_game.get_id(),
80
+ crn_id=markov_game.get_crn_id()
81
+ )
82
+ previous_node = root
83
+
84
+ while not terminated:
85
+ mg_before_action = markov_game.get_safe_copy()
86
+
87
+ # Get safe copies for main branch
88
+ agent_action_safe_copies: dict[
89
+ AgentId, AgentAndActionSafeCopy
90
+ ] = await markov_game.get_actions_of_agents_without_side_effects()
91
+
92
+ markov_game.set_actions_of_agents_manually(agent_action_safe_copies)
93
+ terminated = markov_game.take_simulation_step()
94
+ main_node = RolloutTreeNode(
95
+ step_log=markov_game.get_step_log(), time_step=time_step
96
+ )
97
+ branch_node = RolloutTreeBranchNode(main_child=main_node)
98
+ previous_node.child = branch_node
99
+ previous_node = main_node
100
+
101
+ # Get alternative branches by generating new unilateral actions
102
+ for agent_id in markov_game.agent_ids:
103
+ for _ in range(nb_alternative_actions):
104
+ # Get safe copies for branches
105
+ branch_agent_action_safe_copies: dict[
106
+ AgentId, AgentAndActionSafeCopy
107
+ ] = {
108
+ agent_id: AgentAndActionSafeCopy(
109
+ action=copy.deepcopy(agent_action_safe_copy.action),
110
+ action_info=copy.deepcopy(agent_action_safe_copy.action_info),
111
+ agent_after_action=agent_action_safe_copy.agent_after_action.get_safe_copy(),
112
+ )
113
+ for agent_id, agent_action_safe_copy in agent_action_safe_copies.items()
114
+ }
115
+ mg_branch: MarkovGame = mg_before_action.get_safe_copy()
116
+ other_agent_id = [id for id in mg_branch.agent_ids if id != agent_id][0]
117
+ mg_branch.set_action_and_agent_after_action_manually(
118
+ agent_id=other_agent_id,
119
+ agent_action_safe_copy=branch_agent_action_safe_copies[
120
+ other_agent_id
121
+ ],
122
+ )
123
+ task = asyncio.create_task(
124
+ run_with_unilateral_alt_action(
125
+ markov_game=mg_branch,
126
+ time_step=time_step,
127
+ agent_id=agent_id,
128
+ branch_node=branch_node,
129
+ max_depth=max_depth,
130
+ )
131
+ )
132
+ tasks.append(task)
133
+ time_step += 1
134
+
135
+ # wait for all branches to complete
136
+ await asyncio.gather(*tasks)
137
+
138
+ return root
src_code_for_reproducibility/markov_games/group_timesteps.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module contains the logic for grouping time steps.
3
+ """
4
+ import copy
5
+ from typing import Callable
6
+
7
+ from mllm.markov_games.markov_game import MarkovGame
8
+ from mllm.markov_games.rollout_tree import (
9
+ AgentActLog,
10
+ RolloutTreeBranchNode,
11
+ RolloutTreeNode,
12
+ RolloutTreeRootNode,
13
+ StepLog,
14
+ )
15
+ from mllm.markov_games.simulation import SimulationStepLog
16
+
17
+ AgentId = str
18
+
19
+
20
+ def group_time_steps(
21
+ rollout_tree: RolloutTreeRootNode,
22
+ accumulation_stop_condition: Callable[[StepLog], bool],
23
+ ) -> RolloutTreeRootNode:
24
+ """
25
+ During generation, we create rollout trees according to the real time steps.
26
+ However, during training, we might want to treat groups of time steps as a single time step.
27
+ As a concrete example, take Trust-and-Split. At each round, say we have X time steps of communication and then one time step for the split.
28
+ Then the communication actions will not get any reward, and the split action will get the reward. During REINFORCE training, with discounting, this
29
+ can cause training instability. We could instead treat every action in the round as being part of a single action, and give it the reward of the split action.
30
+ This method helps to do this sort of grouping.
31
+ It accumulates actions until the accumulation_stop_condition is met, and then creates a new node with the accumulated actions.
32
+ It then recursively calls itself on the child node.
33
+ Details:
34
+ - The reward for the group is the reward of the last time step in the group.
35
+ - The simulation log for the group is the simulation log of the last time step in the group.
36
+ - The state end for the group becomes the first state end in the group.
37
+ - The agent info for the group is the agent info of the last time step in the group.
38
+ """
39
+
40
+ def group_step_logs(step_logs: list[StepLog]) -> StepLog:
41
+ """
42
+ Concatenate per-agent chat turns across steps; keep only the first is_state_end.
43
+ """
44
+ last_sim_log = step_logs[-1].simulation_step_log
45
+ agent_ids = {aid for s in step_logs for aid in s.action_logs.keys()}
46
+ grouped_logs: dict[AgentId, AgentActLog] = {}
47
+ for aid in agent_ids:
48
+ turns = []
49
+ for s in step_logs:
50
+ act = s.action_logs.get(aid)
51
+ if act and act.chat_turns:
52
+ turns.extend(copy.deepcopy(act.chat_turns))
53
+ disable_is_state_end = False
54
+ # Only the first state_end should be True, the rest should be False
55
+ for t in turns:
56
+ if t.is_state_end:
57
+ if disable_is_state_end:
58
+ t.is_state_end = False
59
+ else:
60
+ disable_is_state_end = True
61
+ continue
62
+ grouped_logs[aid] = AgentActLog(
63
+ chat_turns=turns, info=step_logs[-1].action_logs[aid].info
64
+ )
65
+ return StepLog(action_logs=grouped_logs, simulation_step_log=last_sim_log)
66
+
67
+ def group_time_steps_rec(
68
+ current_node: RolloutTreeNode | RolloutTreeBranchNode,
69
+ group_time_step: int,
70
+ accumulation_step_logs: list[StepLog],
71
+ ) -> RolloutTreeNode | RolloutTreeBranchNode:
72
+ """
73
+ Groups time steps. Recursion is used to handle branches.
74
+ """
75
+ assert isinstance(current_node, RolloutTreeNode) or isinstance(
76
+ current_node, RolloutTreeBranchNode
77
+ ), "Current node must be a tree node or a branch node. Is of type: " + str(
78
+ type(current_node)
79
+ )
80
+ first_group_node = None
81
+ current_group_node = None
82
+ while current_node is not None:
83
+ if isinstance(current_node, RolloutTreeBranchNode):
84
+ raise Exception(
85
+ "Grouping timesteps by round is not supported for branching trajectories yet."
86
+ )
87
+ # Special recursive case for branches
88
+ # if isinstance(current_node, RolloutTreeBranchNode):
89
+ # branches = {}
90
+ # for agent_id, branch_nodes in current_node.branches.items():
91
+ # branch_group_nodes = []
92
+ # for branch_node in branch_nodes:
93
+ # branch_group_node = group_time_steps_rec(
94
+ # current_node=branch_node,
95
+ # group_time_step=group_time_step,
96
+ # accumulation_step_logs=copy.deepcopy(accumulation_step_logs))
97
+ # branch_group_nodes.append(branch_group_node)
98
+ # branches[agent_id] = branch_group_nodes
99
+
100
+ # main_child_group_node = group_time_steps_rec(
101
+ # current_node=current_node.main_child,
102
+ # group_time_step=group_time_step,
103
+ # accumulation_step_logs=copy.deepcopy(accumulation_step_logs))
104
+
105
+ # return RolloutTreeBranchNode(main_child=main_child_group_node, branches=branches)
106
+
107
+ # Accumulate
108
+ accumulation_step_logs.append(current_node.step_log)
109
+ if accumulation_stop_condition(current_node.step_log):
110
+ grouped_step_logs = group_step_logs(accumulation_step_logs)
111
+ accumulation_step_logs = []
112
+ new_group_node = RolloutTreeNode(
113
+ step_log=grouped_step_logs, time_step=group_time_step, child=None
114
+ )
115
+ if first_group_node == None:
116
+ first_group_node = new_group_node
117
+ group_time_step += 1
118
+ if current_group_node is not None:
119
+ current_group_node.child = new_group_node
120
+ current_group_node = new_group_node
121
+ current_node = current_node.child
122
+ return first_group_node
123
+
124
+ node = group_time_steps_rec(
125
+ current_node=rollout_tree.child, group_time_step=0, accumulation_step_logs=[]
126
+ )
127
+ return RolloutTreeRootNode(
128
+ id=rollout_tree.id,
129
+ crn_id=rollout_tree.crn_id,
130
+ child=node,
131
+ agent_ids=rollout_tree.agent_ids,
132
+ )
133
+
134
+
135
+ def stop_when_round_ends(step_log: StepLog) -> bool:
136
+ """
137
+ Simplest stop condition. Will return True if step log is the last time step of a round.
138
+ This will throw an error if this information is not available in the simulation info.
139
+ """
140
+ assert (
141
+ "is_last_timestep_in_round" in step_log.simulation_step_log.info.keys()
142
+ ), "To group by round, is_last_timestep_in_round must be set in the info of your simulation step log at each time step."
143
+ return step_log.simulation_step_log.info["is_last_timestep_in_round"]
144
+
145
+
146
+ def group_by_round(rollout_tree: RolloutTreeRootNode) -> RolloutTreeRootNode:
147
+ """
148
+ Groups time steps by round.
149
+ """
150
+ return group_time_steps(rollout_tree, stop_when_round_ends)
src_code_for_reproducibility/markov_games/linear_runner.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import json
3
+ import os.path
4
+
5
+ from mllm.markov_games.markov_game import MarkovGame
6
+ from mllm.markov_games.rollout_tree import RolloutTreeNode, RolloutTreeRootNode
7
+
8
+
9
+ async def LinearRunner(
10
+ markov_game: MarkovGame, output_folder: str
11
+ ) -> RolloutTreeRootNode:
12
+ """
13
+ This method generates a trajectory without branching.
14
+ """
15
+ time_step = 0
16
+ terminated = False
17
+ root = RolloutTreeRootNode(
18
+ id=markov_game.get_id(),
19
+ crn_id=markov_game.get_crn_id(),
20
+ agent_ids=markov_game.get_agent_ids(),
21
+ )
22
+ previous_node = root
23
+ while not terminated:
24
+ terminated, step_log = await markov_game.step()
25
+ current_node = RolloutTreeNode(step_log=step_log, time_step=time_step)
26
+ previous_node.child = current_node
27
+ previous_node = current_node
28
+ time_step += 1
29
+
30
+ return root
src_code_for_reproducibility/markov_games/markov_game.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This class unifies a simulation, and the agents acting in it (see `simulation.py` & `agent.py`).
3
+ In a MarkovGame step,
4
+ 1) each agent takes an action,
5
+ 2) the state transitions with respect to these actions,
6
+ 3) all relevant data of the step is appended to the historical data list
7
+
8
+ In order to perform 3), the agents and the simulation are expected, at each time step,
9
+ to return a log of the state transition (from their perspective).
10
+ For instance, the Simulation might send rewards and the agents might send prompting contexts to be used later to generate the training data.
11
+ A different approach would be to simply have the agents keep their data private and log it upon completion of a trajectory.
12
+ The approach we use here centralizes the data gathering aspect,
13
+ making it easy to create sub-trajectories (in the `runners` defined in `runners.py`) descriptions that
14
+ only log information for step transitions occuring after the branching out.
15
+ """
16
+ import asyncio
17
+ import copy
18
+ import json
19
+ import os
20
+ from dataclasses import dataclass
21
+ from typing import Any, List, Literal, Optional, Tuple
22
+
23
+ from transformers.models.idefics2 import Idefics2Config
24
+
25
+ from mllm.markov_games.agent import Agent
26
+ from mllm.markov_games.rollout_tree import AgentActLog, StepLog
27
+ from mllm.markov_games.simulation import Simulation
28
+
29
+ AgentId = str
30
+
31
+
32
+ @dataclass
33
+ class AgentAndActionSafeCopy:
34
+ action: Any
35
+ action_info: AgentActLog
36
+ agent_after_action: type[Agent]
37
+
38
+
39
+ class MarkovGame(object):
40
+ def __init__(
41
+ self,
42
+ id: int,
43
+ agents: dict[AgentId, type[Agent]],
44
+ simulation: type[Simulation],
45
+ crn_id: int,
46
+ ):
47
+ """
48
+ Args:
49
+ agents:
50
+ output_path:
51
+ Path where the step infos are saved.
52
+ simulation:
53
+ Simulation object. Example: IPDSimulation
54
+ """
55
+ self.agents = agents
56
+ self.agent_ids = self.agents.keys()
57
+ self.simulation = simulation
58
+ self.simulation_step_log = None
59
+ self.agent_step_logs = {agent_id: None for agent_id in self.agent_ids}
60
+ self.actions = {}
61
+ self.id = id
62
+ self.crn_id = crn_id
63
+
64
+ def get_id(self) -> str:
65
+ return self.id
66
+
67
+ def get_crn_id(self) -> int:
68
+ return self.crn_id
69
+
70
+ def get_agent_ids(self) -> List[AgentId]:
71
+ return list(self.agent_ids)
72
+
73
+ async def get_action_of_agent_without_side_effects(
74
+ self, agent_id: AgentId
75
+ ) -> Tuple[Any, AgentActLog]:
76
+ """
77
+ Safe function to get an action of an agent without modifying the agent or the simulation.
78
+ """
79
+ agent = self.agents[agent_id]
80
+ agent_before_action = agent.get_safe_copy()
81
+ obs = self.simulation.get_obs_agent(agent_id)
82
+ action, action_info = await agent.act(observation=obs)
83
+ self.agents[agent_id] = agent_before_action
84
+ agent_after_action = agent.get_safe_copy()
85
+ return AgentAndActionSafeCopy(action, action_info, agent_after_action)
86
+
87
+ async def get_actions_of_agents_without_side_effects(
88
+ self,
89
+ ) -> dict[AgentId, AgentAndActionSafeCopy]:
90
+ """
91
+ Safe function to get an action of an agent without modifying the agent or the simulation.
92
+ """
93
+ tasks = []
94
+ for agent_id in self.agent_ids:
95
+ task = asyncio.create_task(
96
+ self.get_action_of_agent_without_side_effects(agent_id)
97
+ )
98
+ tasks.append(task)
99
+ agent_and_action_safe_copies: list[
100
+ AgentAndActionSafeCopy
101
+ ] = await asyncio.gather(*tasks)
102
+ return {
103
+ agent_id: agent_and_action_safe_copy
104
+ for agent_id, agent_and_action_safe_copy in zip(
105
+ self.agent_ids, agent_and_action_safe_copies
106
+ )
107
+ }
108
+
109
+ def set_action_and_agent_after_action_manually(
110
+ self,
111
+ agent_id: AgentId,
112
+ agent_action_safe_copy: AgentAndActionSafeCopy,
113
+ ):
114
+ """
115
+ Set the action and the agent after action manually.
116
+ """
117
+ self.actions[agent_id] = agent_action_safe_copy.action
118
+ self.agent_step_logs[agent_id] = agent_action_safe_copy.action_info
119
+ self.agents[agent_id] = agent_action_safe_copy.agent_after_action
120
+
121
+ def set_actions_of_agents_manually(
122
+ self, actions: dict[AgentId, AgentAndActionSafeCopy]
123
+ ):
124
+ """
125
+ Set the actions of agents manually.
126
+ """
127
+ for agent_id, agent_action_safe_copy in actions.items():
128
+ self.set_action_and_agent_after_action_manually(
129
+ agent_id, agent_action_safe_copy
130
+ )
131
+
132
+ async def set_action_of_agent(self, agent_id: AgentId):
133
+ """
134
+ TOWRITE
135
+ """
136
+ agent = self.agents[agent_id]
137
+ obs = self.simulation.get_obs_agent(agent_id)
138
+ action, action_info = await agent.act(observation=obs)
139
+ self.actions[agent_id] = action
140
+ self.agent_step_logs[agent_id] = action_info
141
+
142
+ async def set_actions(self):
143
+ """
144
+ TOWRITE
145
+ """
146
+ # background_tasks = set()
147
+ tasks = []
148
+ for agent_id in self.agent_ids:
149
+ task = asyncio.create_task(self.set_action_of_agent(agent_id))
150
+ tasks.append(task)
151
+ await asyncio.gather(*tasks)
152
+
153
+ def take_simulation_step(self):
154
+ """
155
+ TOWRITE
156
+ """
157
+ terminated, self.simulation_step_log = self.simulation.step(self.actions)
158
+ return terminated
159
+
160
+ def get_step_log(self) -> StepLog:
161
+ """
162
+ TOWRITE
163
+ TODO: assert actions and simulation have taken step
164
+ """
165
+ step_log = StepLog(
166
+ simulation_step_log=self.simulation_step_log,
167
+ action_logs=self.agent_step_logs,
168
+ )
169
+ return step_log
170
+
171
+ async def step(self) -> Tuple[bool, StepLog]:
172
+ """
173
+ TOWRITE
174
+ """
175
+ await self.set_actions()
176
+ terminated = self.take_simulation_step()
177
+ step_log = self.get_step_log()
178
+ return terminated, step_log
179
+
180
+ def get_safe_copy(self):
181
+ """
182
+ TOWRITE
183
+ """
184
+
185
+ new_markov_game = copy.copy(self)
186
+ new_simulation = self.simulation.get_safe_copy()
187
+ new_agents = {
188
+ agent_id: agent.get_safe_copy() for agent_id, agent in self.agents.items()
189
+ }
190
+
191
+ # Reassign copied components
192
+ new_markov_game.simulation = new_simulation
193
+ new_markov_game.agents = new_agents
194
+
195
+ # IMPORTANT: ensure agent_ids references the new agents dict, not the original
196
+ new_markov_game.agent_ids = new_markov_game.agents.keys()
197
+
198
+ # Deep-copy step data to avoid correlation
199
+ new_markov_game.simulation_step_log = copy.deepcopy(self.simulation_step_log)
200
+ new_markov_game.actions = copy.deepcopy(self.actions)
201
+ # Rebuild logs to align exactly with new agent ids
202
+ old_agent_step_logs = copy.deepcopy(self.agent_step_logs)
203
+ new_markov_game.agent_step_logs = {
204
+ agent_id: old_agent_step_logs.get(agent_id)
205
+ for agent_id in new_markov_game.agent_ids
206
+ }
207
+
208
+ return new_markov_game
src_code_for_reproducibility/markov_games/mg_utils.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import copy
3
+ from collections.abc import Callable
4
+ from dataclasses import dataclass
5
+
6
+ from mllm.markov_games.ipd.ipd_agent import IPDAgent
7
+ from mllm.markov_games.ipd.ipd_simulation import IPD
8
+ from mllm.markov_games.markov_game import MarkovGame
9
+ from mllm.markov_games.negotiation.dond_agent import DealNoDealAgent
10
+ from mllm.markov_games.negotiation.dond_simulation import DealNoDealSimulation
11
+ from mllm.markov_games.negotiation.nego_hard_coded_policies import (
12
+ HardCodedNegoGreedyPolicy,
13
+ HardCodedNegoWelfareMaximizingPolicy,
14
+ )
15
+ from mllm.markov_games.ipd.Ipd_hard_coded_agents import AlwaysCooperateIPDAgent, AlwaysDefectIPDAgent
16
+ from mllm.markov_games.negotiation.no_press_nego_agent import NoPressAgent
17
+ from mllm.markov_games.negotiation.no_press_nego_simulation import NoPressSimulation
18
+ from mllm.markov_games.negotiation.tas_agent import TrustAndSplitAgent
19
+ from mllm.markov_games.negotiation.tas_rps_agent import TrustAndSplitRPSAgent
20
+ from mllm.markov_games.negotiation.tas_rps_simulation import TrustAndSplitRPSSimulation
21
+ from mllm.markov_games.negotiation.tas_simple_agent import TrustAndSplitSimpleAgent
22
+ from mllm.markov_games.negotiation.tas_simple_simulation import (
23
+ TrustAndSplitSimpleSimulation,
24
+ )
25
+ from mllm.markov_games.negotiation.tas_simulation import TrustAndSplitSimulation
26
+ from mllm.markov_games.rollout_tree import (
27
+ AgentActLog,
28
+ RolloutTreeBranchNode,
29
+ RolloutTreeNode,
30
+ RolloutTreeRootNode,
31
+ StepLog,
32
+ )
33
+ from mllm.markov_games.simulation import SimulationStepLog
34
+
35
+ AgentId = str
36
+
37
+
38
+ @dataclass
39
+ class AgentConfig:
40
+ agent_id: str
41
+ agent_name: str
42
+ agent_class_name: str
43
+ policy_id: str
44
+ init_kwargs: dict
45
+
46
+
47
+ @dataclass
48
+ class MarkovGameConfig:
49
+ id: int
50
+ seed: int
51
+ simulation_class_name: str
52
+ simulation_init_args: dict
53
+ agent_configs: list[AgentConfig]
54
+
55
+
56
+ def init_markov_game_components(
57
+ config: MarkovGameConfig, policies: dict[str, Callable[[list[dict]], str]]
58
+ ):
59
+ """
60
+ TOWRITE
61
+ """
62
+ agents = {}
63
+ agent_names = []
64
+ for agent_config in config.agent_configs:
65
+ agent_id = agent_config.agent_id
66
+ agent_name = agent_config.agent_name
67
+ agent_class = eval(agent_config.agent_class_name)
68
+ agent = agent_class(
69
+ seed=config.seed,
70
+ agent_id=agent_id,
71
+ agent_name=agent_name,
72
+ policy=policies[agent_config.policy_id],
73
+ **agent_config.init_kwargs,
74
+ )
75
+ agents[agent_id] = agent
76
+ agent_names.append(agent_name)
77
+ simulation = eval(config.simulation_class_name)(
78
+ seed=config.seed,
79
+ agent_ids=list(agents.keys()),
80
+ agent_names=agent_names,
81
+ **config.simulation_init_args,
82
+ )
83
+ markov_game = MarkovGame(
84
+ id=config.id,
85
+ crn_id=config.seed,
86
+ agents=agents,
87
+ simulation=simulation,
88
+ )
89
+ return markov_game
src_code_for_reproducibility/markov_games/negotiation/nego_agent.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ from abc import abstractmethod
3
+ from collections.abc import Callable
4
+ from dataclasses import dataclass
5
+ from typing import Any, Dict, List, Tuple
6
+
7
+ import numpy as np
8
+
9
+ from mllm.markov_games.agent import Agent
10
+ from mllm.markov_games.negotiation.nego_simulation import Message, NegotiationObs, Split
11
+ from mllm.markov_games.rollout_tree import AgentActLog, ChatTurn
12
+
13
+
14
+ @dataclass
15
+ class NegotiationAgentState:
16
+ round_nb: int
17
+ nb_messages_sent_this_round: int
18
+ chat_counter: int
19
+ chat_history: List[ChatTurn]
20
+
21
+
22
+ class NegotiationAgent(Agent):
23
+ def __init__(
24
+ self,
25
+ seed: int,
26
+ agent_id: str,
27
+ agent_name: str,
28
+ policy: Callable[[List[Dict]], str],
29
+ goal: str,
30
+ exploration_prompts: List[str] = [],
31
+ exploration_prompt_probs: List[float] = [],
32
+ ):
33
+ self.seed = seed
34
+ self.agent_id = agent_id
35
+ self.agent_name = agent_name
36
+ self.policy = policy
37
+ self.goal = goal
38
+ self.exploration_prompts_toggled = len(exploration_prompts) > 0
39
+ if self.exploration_prompts_toggled:
40
+ exploration_prompts = copy.deepcopy(exploration_prompts)
41
+ exploration_prompts.append(None)
42
+ self.exploration_prompts = exploration_prompts
43
+ self.exploration_prompt_probs = np.array(exploration_prompt_probs)
44
+ assert self.exploration_prompt_probs.sum() <= 1
45
+ assert np.all(self.exploration_prompt_probs >= 0)
46
+ self.exploration_prompt_probs = np.append(
47
+ self.exploration_prompt_probs, 1 - self.exploration_prompt_probs.sum()
48
+ )
49
+ self.state = NegotiationAgentState(
50
+ round_nb=0, nb_messages_sent_this_round=0, chat_counter=0, chat_history=[]
51
+ )
52
+
53
+ # Implemented in variants
54
+ self.intro_prompt = ""
55
+ self.new_round_prompt = ""
56
+ self.last_round_prompt = ""
57
+ self.send_split_prompt = ""
58
+ self.wait_for_message_prompt = ""
59
+ self.last_message_prompt = ""
60
+ self.send_message_prompt = ""
61
+
62
+ @abstractmethod
63
+ def get_message_regex(self, observation: NegotiationObs) -> str:
64
+ pass
65
+
66
+ @abstractmethod
67
+ def get_split_regex(self, observation: NegotiationObs) -> str:
68
+ pass
69
+
70
+ @abstractmethod
71
+ def get_split_action(
72
+ self, policy_output: str, observation: NegotiationObs
73
+ ) -> Split:
74
+ pass
75
+
76
+ async def act(self, observation: NegotiationObs) -> Tuple[Any, AgentActLog]:
77
+ def dict_to_str(d: dict) -> str:
78
+ return ", ".join(f"{v} {k}" for k, v in d.items())
79
+
80
+ def dict_to_eq_str(d: dict) -> str:
81
+ return ", ".join(f"{k}={v}" for k, v in d.items())
82
+
83
+ is_our_turn = observation.current_agent == self.agent_id
84
+ action: Any = None
85
+ round_nb = observation.round_nb
86
+
87
+ prompt_parts: List[str] = []
88
+ obs_ctx = vars(observation)
89
+ obs_ctx_formmated = obs_ctx.copy()
90
+ for key in obs_ctx_formmated:
91
+ if isinstance(obs_ctx_formmated[key], dict) and "value" not in key:
92
+ obs_ctx_formmated[key] = dict_to_str(obs_ctx_formmated[key])
93
+ elif isinstance(obs_ctx_formmated[key], dict) and "value" in key:
94
+ obs_ctx_formmated[key] = dict_to_eq_str(obs_ctx_formmated[key])
95
+
96
+ #######################################
97
+ # build user prompt
98
+ #######################################
99
+
100
+ # First-ever call
101
+ is_intro = round_nb == 0 and self.state.chat_counter == 0
102
+ if is_intro:
103
+ prompt_parts.append(
104
+ self.intro_prompt.format(
105
+ goal=self.goal, agent=self.agent_name, **obs_ctx_formmated
106
+ )
107
+ )
108
+
109
+ # New round
110
+ is_new_round = round_nb > self.state.round_nb
111
+ if is_new_round or is_intro:
112
+ self.state.nb_messages_sent_this_round = 0
113
+ if not is_intro:
114
+ prompt_parts.append(self.last_round_prompt.format(**obs_ctx_formmated))
115
+ prompt_parts.append(self.new_round_prompt.format(**obs_ctx_formmated))
116
+ if self.exploration_prompts_toggled:
117
+ exploration_prompt = self.exploration_prompts[
118
+ np.random.choice(
119
+ len(self.exploration_prompts), p=self.exploration_prompt_probs
120
+ )
121
+ ]
122
+ if exploration_prompt is not None:
123
+ prompt_parts.append(exploration_prompt)
124
+ self.state.round_nb = round_nb
125
+
126
+ # Wait for message
127
+ if not is_our_turn and not observation.split_phase:
128
+ prompt_parts.append(
129
+ self.wait_for_message_prompt.format(**obs_ctx_formmated)
130
+ )
131
+
132
+ # Get last message
133
+ if is_our_turn and not is_new_round and not is_intro:
134
+ prompt_parts.append(self.last_message_prompt.format(**obs_ctx_formmated))
135
+
136
+ # Prompt to send message
137
+ must_send_message = not observation.split_phase and is_our_turn
138
+ if must_send_message:
139
+ prompt_parts.append(self.send_message_prompt.format(**obs_ctx_formmated))
140
+
141
+ # Prompt to give split
142
+ must_send_split = not must_send_message and observation.split_phase
143
+ if must_send_split:
144
+ var_names = ["x", "y", "z", "w"] # Extend as needed
145
+ items_str = ", ".join(
146
+ [
147
+ f"{var_names[i]} {item}"
148
+ for i, item in enumerate(obs_ctx["quantities"].keys())
149
+ ]
150
+ )
151
+ ranges_str = ", ".join(
152
+ [
153
+ f"{var_names[i]}: 0-{obs_ctx['quantities'][item]} (integer)"
154
+ for i, item in enumerate(obs_ctx["quantities"].keys())
155
+ ]
156
+ )
157
+ proposal_style = f"Proposal: {items_str} where {ranges_str}."
158
+ proposal_style2 = (
159
+ f"<items_to_self> {items_str} </items_to_self> where {ranges_str}."
160
+ )
161
+ prompt_parts.append(
162
+ self.send_split_prompt.format(
163
+ proposal_style=proposal_style,
164
+ proposal_style2=proposal_style2,
165
+ **obs_ctx_formmated,
166
+ )
167
+ )
168
+
169
+ # Append one ChatTurn with is_state_end=True
170
+ user_prompt = "\n".join(prompt_parts)
171
+ self.state.chat_history.append(
172
+ ChatTurn(
173
+ agent_id=self.agent_id,
174
+ role="user",
175
+ content=user_prompt,
176
+ is_state_end=True,
177
+ )
178
+ )
179
+
180
+ #######################################
181
+ # Get policy action
182
+ #######################################
183
+
184
+ # Query policy for the appropriate format
185
+ if must_send_message:
186
+ return_regex = self.get_message_regex(observation)
187
+ policy_output = await self.policy(
188
+ state=self.state.chat_history,
189
+ agent_id=self.agent_id,
190
+ regex=return_regex,
191
+ )
192
+ self.state.chat_history.append(
193
+ ChatTurn(
194
+ agent_id=self.agent_id,
195
+ role="assistant",
196
+ content=policy_output.content,
197
+ reasoning_content=policy_output.reasoning_content,
198
+ log_probs=policy_output.log_probs,
199
+ out_token_ids=policy_output.out_token_ids,
200
+ is_state_end=False,
201
+ )
202
+ )
203
+ action = Message(message=policy_output.content)
204
+ self.state.nb_messages_sent_this_round += 1
205
+
206
+ elif must_send_split:
207
+ return_regex = self.get_split_regex(observation)
208
+ policy_output = await self.policy(
209
+ state=self.state.chat_history,
210
+ agent_id=self.agent_id,
211
+ regex=return_regex,
212
+ )
213
+ self.state.chat_history.append(
214
+ ChatTurn(
215
+ agent_id=self.agent_id,
216
+ role="assistant",
217
+ content=policy_output.content,
218
+ reasoning_content=policy_output.reasoning_content,
219
+ log_probs=policy_output.log_probs,
220
+ out_token_ids=policy_output.out_token_ids,
221
+ is_state_end=False,
222
+ )
223
+ )
224
+ action = self.get_split_action(policy_output.content, observation)
225
+ else:
226
+ action = None
227
+
228
+ agent_step_log = AgentActLog(
229
+ chat_turns=self.state.chat_history[self.state.chat_counter :], info=None
230
+ )
231
+ self.state.chat_counter = len(self.state.chat_history)
232
+ return action, agent_step_log
233
+
234
+ def get_safe_copy(self):
235
+ agent_copy = copy.copy(self)
236
+ agent_copy.state = copy.deepcopy(self.state)
237
+ return agent_copy
238
+
239
+ def reset(self):
240
+ self.state = NegotiationAgentState(
241
+ round_nb=0, nb_messages_sent_this_round=0, chat_counter=0, chat_history=[]
242
+ )
src_code_for_reproducibility/markov_games/negotiation/nego_simulation.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Negotiation simulation environment
3
+ other agent is set at the start of every round. Even though current agent changes over message turns in a round.
4
+ """
5
+ import copy
6
+ from abc import abstractmethod
7
+ from dataclasses import dataclass
8
+ from typing import Any, Dict, List, Tuple
9
+
10
+ from numpy.random import default_rng
11
+
12
+ from mllm.markov_games.rollout_tree import SimulationStepLog
13
+ from mllm.markov_games.simulation import Simulation
14
+ from mllm.utils.get_coagent_id import get_coagent_id
15
+
16
+ AgentId = str
17
+
18
+
19
+ @dataclass
20
+ class Split:
21
+ items_given_to_self: Dict[str, int]
22
+
23
+
24
+ @dataclass
25
+ class Message:
26
+ message: str
27
+
28
+
29
+ @dataclass # gets extended by variants
30
+ class NegotiationState:
31
+ round_nb: int
32
+ last_message: str
33
+ current_agent: AgentId
34
+ quantities: Dict[str, int]
35
+ values: Dict[AgentId, Dict[str, float]]
36
+ splits: Dict[AgentId, Split | None]
37
+ nb_messages_sent: Dict[AgentId, int]
38
+ previous_values: Dict[AgentId, Dict[str, float]] | None
39
+ previous_splits: Dict[AgentId, Dict[str, int] | None] | None
40
+ previous_points: Dict[AgentId, float] | None
41
+ previous_quantities: Dict[str, int] | None
42
+ split_phase: bool
43
+
44
+
45
+ @dataclass # gets extended by variants
46
+ class NegotiationObs:
47
+ round_nb: int
48
+ last_message: str
49
+ quota_messages_per_agent_per_round: int
50
+ current_agent: AgentId
51
+ other_agent: str
52
+ quantities: Dict[str, int]
53
+ item_types: List[str]
54
+ value: Dict[str, int]
55
+ split_phase: bool
56
+ last_split_agent: Dict[str, int] | None
57
+ last_value_agent: Dict[str, int] | None
58
+ last_points_agent: float | None
59
+ last_split_coagent: Dict[str, int] | None
60
+ last_value_coagent: Dict[str, int] | None
61
+ last_points_coagent: float | None
62
+ last_quantities: Dict[str, int] | None
63
+
64
+
65
+ def compute_tas_style_rewards(
66
+ agent_ids: List[AgentId],
67
+ values: Dict[AgentId, float],
68
+ splits: Dict[AgentId, Split],
69
+ quantities: Dict[str, int],
70
+ ) -> Dict[AgentId, float]:
71
+ """
72
+ TAS-like reward computation: if sum of proposed coins exceeds max_coins,
73
+ allocate proportionally. Otherwise, use proposed amounts directly.
74
+ Rewards are quantity_kept * per-coin value for each agent.
75
+ """
76
+ a0, a1 = agent_ids[0], agent_ids[1]
77
+ r0, r1 = 0.0, 0.0
78
+
79
+ for item in quantities:
80
+ max_item = quantities[item]
81
+ item_to_self_0 = int(
82
+ (splits[a0].items_given_to_self.get(item, 0))
83
+ if splits[a0] is not None
84
+ else 0
85
+ )
86
+ item_to_self_1 = int(
87
+ (splits[a1].items_given_to_self.get(item, 0))
88
+ if splits[a1] is not None
89
+ else 0
90
+ )
91
+ denom = max(int(max_item), item_to_self_0 + item_to_self_1)
92
+ q0 = float(max_item) * float(item_to_self_0) / float(denom)
93
+ q1 = float(max_item) * float(item_to_self_1) / float(denom)
94
+ if type(values[a0]) is not dict:
95
+ r0 += q0 * float(values[a0])
96
+ r1 += q1 * float(values[a1])
97
+ else:
98
+ r0 += q0 * float(values[a0][item])
99
+ r1 += q1 * float(values[a1][item])
100
+ return {a0: r0, a1: r1}
101
+
102
+
103
+ class NegotiationSimulation(Simulation):
104
+ def __init__(
105
+ self,
106
+ agent_ids: List[AgentId],
107
+ agent_names: List[str],
108
+ seed: int,
109
+ nb_of_rounds: int,
110
+ quota_messages_per_agent_per_round: int,
111
+ item_types: List[str] | None = None,
112
+ ):
113
+ self.seed = seed
114
+ self.rng = default_rng(self.seed)
115
+ self.agent_ids = list(agent_ids)
116
+ self.agent_names = agent_names
117
+ self.agent_id_to_name = {
118
+ agent_id: agent_name for agent_id, agent_name in zip(agent_ids, agent_names)
119
+ }
120
+ self.nb_of_rounds = int(nb_of_rounds)
121
+ self.quota_messages_per_agent_per_round = int(
122
+ quota_messages_per_agent_per_round
123
+ )
124
+ if item_types is not None:
125
+ self.item_types = [item.lower() for item in item_types]
126
+ else:
127
+ self.item_types = ["coins"]
128
+ self.state: NegotiationState | None = None
129
+ self._starting_agent_index = self.rng.choice([0, 1])
130
+ self.reset()
131
+
132
+ def _other(self, agent_id: AgentId) -> AgentId:
133
+ return get_coagent_id(self.agent_ids, agent_id)
134
+
135
+ @abstractmethod
136
+ def set_new_round_of_variant(self):
137
+ pass
138
+
139
+ @abstractmethod
140
+ def get_info_of_variant(
141
+ self, state: NegotiationState, actions: Dict[AgentId, Any]
142
+ ) -> Dict[str, Any]:
143
+ pass
144
+
145
+ def step(self, actions: Any) -> Tuple[bool, SimulationStepLog]:
146
+ """
147
+ Returns terminated, step_log
148
+ """
149
+ assert self.state is not None
150
+ current_agent = self.state.current_agent
151
+ a0, a1 = self.agent_ids[0], self.agent_ids[1]
152
+ action = actions.get(current_agent)
153
+
154
+ # Split phase: require both splits in the same timestep
155
+ if self.state.split_phase:
156
+ action_a0 = actions.get(a0)
157
+ action_a1 = actions.get(a1)
158
+ have_both_splits = isinstance(action_a0, Split) and isinstance(
159
+ action_a1, Split
160
+ )
161
+ if not have_both_splits:
162
+ rewards = {agent_id: 0.0 for agent_id in self.agent_ids}
163
+ return False, SimulationStepLog(
164
+ rewards=rewards, info={"type": "waiting_for_splits"}
165
+ )
166
+
167
+ # Record splits
168
+ self.state.splits[a0] = action_a0
169
+ self.state.splits[a1] = action_a1
170
+
171
+ # Compute rewards and end round
172
+ rewards = self.get_rewards(self.state.splits)
173
+
174
+ # Info
175
+ info = self.get_info_of_variant(self.state, actions)
176
+
177
+ # Prepare next round
178
+ # Alternate starting agent
179
+ self.state.round_nb += 1
180
+ self._starting_agent_index = 1 - self._starting_agent_index
181
+ self.state.current_agent = self.agent_ids[self._starting_agent_index]
182
+ self.state.previous_values = copy.deepcopy(self.state.values)
183
+ self.state.previous_splits = copy.deepcopy(self.state.splits)
184
+ self.state.previous_quantities = copy.deepcopy(self.state.quantities)
185
+ self.state.previous_points = copy.deepcopy(rewards)
186
+ self.state.last_message = ""
187
+ self.set_new_round_of_variant() # variant specific
188
+ self.state.splits = {agent_id: None for agent_id in self.agent_ids}
189
+ self.state.nb_messages_sent = {agent_id: 0 for agent_id in self.agent_ids}
190
+ is_last_timestep_in_round = True
191
+ done = self.state.round_nb >= self.nb_of_rounds
192
+
193
+ # Message phase
194
+ elif isinstance(action, Message):
195
+ self.state.last_message = action.message
196
+ self.state.nb_messages_sent[current_agent] += 1
197
+
198
+ # Move turn to other agent
199
+ self.state.current_agent = self._other(current_agent)
200
+
201
+ # If both agents have reached their message quota, enter split phase
202
+ if all(
203
+ self.state.nb_messages_sent[agent_id]
204
+ >= self.quota_messages_per_agent_per_round
205
+ for agent_id in self.agent_ids
206
+ ):
207
+ self.state.split_phase = True
208
+ is_last_timestep_in_round = False
209
+ done = False
210
+ rewards = {agent_id: 0.0 for agent_id in self.agent_ids}
211
+ info = {"type": "message"}
212
+
213
+ info[
214
+ "is_last_timestep_in_round"
215
+ ] = is_last_timestep_in_round # Used later to group round timesteps if needed
216
+ return done, SimulationStepLog(rewards=rewards, info=info)
217
+
218
+ def get_obs(self):
219
+ """Returns all agent observations in dict"""
220
+ return {agent_id: self.get_obs_agent(agent_id) for agent_id in self.agent_ids}
221
+
222
+ @abstractmethod
223
+ def get_rewards(self, splits: Dict[AgentId, Split]) -> Dict[AgentId, float]:
224
+ pass
225
+
226
+ @abstractmethod
227
+ def get_obs_agent(self, agent_id):
228
+ pass
229
+
230
+ def get_state(self):
231
+ return self.state
232
+
233
+ def get_safe_copy(self):
234
+ """Return a safe copy of the simulation."""
235
+ simulation_copy = copy.copy(self)
236
+ simulation_copy.state = copy.deepcopy(self.state)
237
+ return simulation_copy
238
+
239
+ @abstractmethod
240
+ def reset(self) -> dict[AgentId, NegotiationObs]:
241
+ pass
src_code_for_reproducibility/markov_games/negotiation/negotiation_statistics.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import Callable, Dict, List, Tuple
4
+
5
+ from mllm.markov_games.negotiation.nego_simulation import Split
6
+ from mllm.markov_games.rollout_tree import SimulationStepLog
7
+
8
+
9
+ def avg_reward(sl: SimulationStepLog) -> List[Tuple[str, float]]:
10
+ """Average (per-step) reward for each agent and overall.
11
+
12
+ What it computes:
13
+ - Returns the raw reward for every (non-buffer) agent at the current
14
+ simulation step.
15
+ - Adds an aggregate key ``all_agents`` which is the simple arithmetic
16
+ mean across the agents present in ``sl.rewards``.
17
+
18
+ Rationale / motivation:
19
+ Monitoring the reward stream at each step helps:
20
+ * Diagnose reward shaping issues (e.g., unintended negative drift).
21
+ * Provide a fairness snapshot (are rewards systematically skewed?).
22
+ * Supply a ubiquitous baseline metric used by other higher‑level
23
+ summaries (efficiency, surplus allocation, etc.).
24
+
25
+ Return shape:
26
+ { agent_id: float, ..., "all_agents": float }
27
+ If any agent id contains the substring "buffer" we treat this step as
28
+ an implementation artifact (e.g., rollout buffer) and return ``None``
29
+ to avoid polluting aggregates.
30
+ """
31
+ for aid in sl.rewards.keys():
32
+ if "buffer" in str(aid) and "live" not in str(aid):
33
+ return None
34
+ # One value per agent at each step
35
+ rewards_dict = {f"reward-{aid}": float(v) for aid, v in (sl.rewards or {}).items()}
36
+ return [(key, value) for key, value in rewards_dict.items() if value is not None]
37
+
38
+
39
+ def split_efficiency(sl: SimulationStepLog) -> List[Tuple[str, float]] | None:
40
+ """Final‑round allocation efficiency relative to an upper bound.
41
+
42
+ What it computes (only on the last timestep of a negotiation round):
43
+ - Uses ``info['values']`` (per‑agent per‑item valuations) and
44
+ ``info['quantities']`` (available item counts) to form a greedy
45
+ *upper bound* on achievable total reward: allocate each unit of an
46
+ item to the single agent who values that item most.
47
+ - Compares the actually realized sum of rewards at that final
48
+ timestep to this constructed maximum.
49
+ - Emits a single scalar under key ``"all_agents"`` equal to
50
+ achieved / theoretical_max.
51
+
52
+ Motivation:
53
+ Efficiency (a core welfare notion) distinguishes between coordination
54
+ failures (low efficiency) versus strategic distributional disputes
55
+ (high efficiency but uneven splits). Tracking this per round helps
56
+ evaluate whether models learn to identify and realize joint surplus.
57
+
58
+ Notes / caveats:
59
+ - Only defined for 2+ non‑buffer agents; if a buffer agent is present
60
+ returns ``None`` to exclude spurious steps.
61
+ - Requires the environment to have populated ``values`` and
62
+ ``quantities``; otherwise returns ``None``.
63
+ - This is an optimistic bound (not necessarily reachable under
64
+ protocol constraints) but is simple, fast, and comparable across
65
+ runs.
66
+ """
67
+ info = sl.info or {}
68
+ if not info or not info.get("is_last_timestep_in_round"):
69
+ return None
70
+ quantities = info.get("quantities") or {}
71
+ values = info.get("values") or {}
72
+ if not values or not quantities:
73
+ return None
74
+ agent_ids = list(sl.rewards.keys())
75
+ if type(values[agent_ids[0]]) is dict:
76
+ item_keys = list(values.values())[0].keys()
77
+ max_vals, max_quantities = [], []
78
+ for item in item_keys:
79
+ max_val = max(float(agent_vals[item]) for agent_vals in values.values())
80
+ max_vals.append(max_val)
81
+ max_quantities.append(quantities[item])
82
+ else:
83
+ max_vals = [max(float(v) for v in values.values())]
84
+ max_quantities = [quantities[item] for item in quantities.keys()]
85
+ for aid in sl.rewards.keys():
86
+ if "buffer" in str(aid) and "live" not in str(aid):
87
+ return None
88
+ achieved = sum(float(v) for v in sl.rewards.values())
89
+ max_reward = sum(d * v for d, v in zip(max_quantities, max_vals))
90
+ # Efficiency is a global metric; emit same value for a special key "all"
91
+ return [("split_efficiency", achieved / max_reward)]
92
+
93
+
94
+ def _extract_items_from_split(raw_split: Dict) -> Dict[str, float] | None:
95
+ """Return a mapping item->proposal amount from a split structure.
96
+
97
+ Supports both generic negotiation splits with nested structure
98
+ { 'items_given_to_self': {item: qty, ...}}
99
+ and TAS coin-only variants which may already be a flat mapping {'coins': qty}.
100
+ """
101
+
102
+ if raw_split is None:
103
+ return {}
104
+ elif isinstance(raw_split, Split):
105
+ return {k: float(v) for k, v in raw_split.items_given_to_self.items()}
106
+ elif isinstance(raw_split, dict):
107
+ if "items_given_to_self" in raw_split and isinstance(
108
+ raw_split["items_given_to_self"], dict
109
+ ):
110
+ return {k: float(v) for k, v in raw_split["items_given_to_self"].items()}
111
+ # Fallback: assume already flat mapping of items
112
+ elif hasattr(raw_split, "items_given_to_self"):
113
+ return {k: float(v) for k, v in raw_split["items_given_to_self"].items()}
114
+ return {
115
+ k: float(v) for k, v in raw_split.items() if isinstance(v, (int, float))
116
+ }
117
+ return {}
118
+
119
+
120
+ def _average_proposal_relative_value(
121
+ sl: SimulationStepLog,
122
+ metric_name: str,
123
+ comparator: Callable[[float, float], bool],
124
+ opposite_comparator: Callable[[float, float], bool],
125
+ ) -> Dict[str, float | None] | None:
126
+ """Shared implementation for proposal size conditioned on relative value.
127
+
128
+ Parameters:
129
+ comparator: returns True when agent_0's value relation (e.g. < or >)
130
+ to agent_1 holds for an item and we should collect agent_0's
131
+ proposed quantity for that item.
132
+ opposite_comparator: inverse relation used to collect agent_1's items.
133
+
134
+ Behavior:
135
+ - Executes only on final timestep of a round (where the definitive
136
+ proposal / allocation is known via ``info['splits']``).
137
+ - For each item, classifies which agent's value satisfies the chosen
138
+ relation and records that agent's proposed quantity from the split.
139
+ - Averages (mean) across all qualifying items per agent; if no items
140
+ qualify for an agent returns ``None`` for that agent id.
141
+ - Adds ``all_agents`` mean across the numeric (non-None) agent values.
142
+
143
+ Why this matters:
144
+ Distinguishing how much an agent *asks for* when it subjectively
145
+ values items more (or less) than its counterpart reveals patterns of
146
+ opportunism vs. concession. This is especially useful when raw reward
147
+ differences are subtle but allocation *intent* differs.
148
+ """
149
+ info = sl.info or {}
150
+ if not info or not info.get("is_last_timestep_in_round"):
151
+ return None
152
+ quantities = info.get("quantities") or {}
153
+ splits = info.get("splits") or {}
154
+ values = info.get("values") or {}
155
+ agent_ids: List[str] = list(sl.rewards.keys())
156
+ if len(agent_ids) != 2:
157
+ return None # Only defined for 2-agent case.
158
+ for aid in agent_ids:
159
+ if "buffer" in str(aid) and "live" not in str(aid):
160
+ return None
161
+ # Extract per-agent item proposals robustly
162
+ split_items = {aid: _extract_items_from_split(splits.get(aid)) for aid in agent_ids}
163
+ agent_0_vals: List[float] = []
164
+ agent_1_vals: List[float] = []
165
+ for item in quantities.keys():
166
+ # Values may be either a float (same for all items) or dict per item
167
+ v0_raw = values[agent_ids[0]]
168
+ v1_raw = values[agent_ids[1]]
169
+ v0 = float(v0_raw[item]) if isinstance(v0_raw, dict) else float(v0_raw)
170
+ v1 = float(v1_raw[item]) if isinstance(v1_raw, dict) else float(v1_raw)
171
+ if comparator(v0, v1):
172
+ agent_0_vals.append(split_items[agent_ids[0]].get(item, 0.0))
173
+ elif opposite_comparator(v0, v1):
174
+ agent_1_vals.append(split_items[agent_ids[1]].get(item, 0.0))
175
+ out: Dict[str, float | None] = {}
176
+ out[f"{metric_name}-{agent_ids[0]}"] = (
177
+ sum(agent_0_vals) / len(agent_0_vals) if agent_0_vals else None
178
+ )
179
+ out[f"{metric_name}-{agent_ids[1]}"] = (
180
+ sum(agent_1_vals) / len(agent_1_vals) if agent_1_vals else None
181
+ )
182
+
183
+ return [(key, value) for key, value in out.items() if value is not None]
184
+
185
+
186
+ def average_proposal_when_agent_values_item_lower(
187
+ sl: SimulationStepLog,
188
+ ) -> List[Tuple[str, float | None]] | None:
189
+ """Mean quantity an agent proposes for items it values *less* than opponent.
190
+
191
+ Interpretation:
192
+ A higher value implies the agent still claims (or is allocated) a
193
+ notable share of items where it has a comparative *disadvantage* in
194
+ valuation, signaling either strategic over-claiming or protocol-driven
195
+ egalitarian splits. Conversely, very low numbers can indicate
196
+ efficient specialization or excessive concession.
197
+
198
+ Returns:
199
+ Mapping { agent_id: float | None, "all_agents": float | None } where
200
+ None indicates no qualifying items for that agent in the round.
201
+ """
202
+ return _average_proposal_relative_value(
203
+ sl,
204
+ "average_proposal_when_agent_values_item_lower",
205
+ lambda a, b: a < b,
206
+ lambda a, b: a > b,
207
+ )
208
+
209
+
210
+ def average_proposal_when_agent_values_item_higher(
211
+ sl: SimulationStepLog,
212
+ ) -> List[Tuple[str, float | None]] | None:
213
+ """Mean quantity an agent proposes for items it values *more* than opponent.
214
+
215
+ Interpretation:
216
+ Captures how aggressively an agent claims items where it holds a
217
+ comparative *advantage*. Elevated values can reflect rational
218
+ specialization (efficient exploitation of comparative advantage) or
219
+ potentially unfair grabs if paired with low concession in the lower
220
+ valuation metric. Comparing this with the 'lower' counterpart helps
221
+ profile negotiation style (cooperative vs. exploitative).
222
+
223
+ Returns:
224
+ Mapping { agent_id: float | None, "all_agents": float | None } where
225
+ None indicates no qualifying items.
226
+ """
227
+ return _average_proposal_relative_value(
228
+ sl,
229
+ "average_proposal_when_agent_values_item_higher",
230
+ lambda a, b: a > b,
231
+ lambda a, b: a < b,
232
+ )
233
+
234
+
235
+ # Explicit list of metric functions exported for rendering. Helper functions
236
+ # starting with '_' are intentionally excluded. Update this list when adding
237
+ # new public statistics so render.py can rely on it instead of introspecting
238
+ # every callable in the module.
239
+ stat_functs: list[Callable[[SimulationStepLog], List[Tuple[str, float]]]] = [
240
+ avg_reward,
241
+ average_proposal_when_agent_values_item_lower,
242
+ average_proposal_when_agent_values_item_higher,
243
+ split_efficiency,
244
+ ]
src_code_for_reproducibility/markov_games/negotiation/no_press_nego_simulation.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ from collections import defaultdict
3
+ from dataclasses import dataclass
4
+ from typing import Any, Dict, List, Literal, Tuple
5
+
6
+ from mllm.markov_games.negotiation.nego_simulation import (
7
+ NegotiationObs,
8
+ NegotiationSimulation,
9
+ NegotiationState,
10
+ Split,
11
+ compute_tas_style_rewards,
12
+ )
13
+
14
+ AgentId = str
15
+
16
+
17
+ @dataclass
18
+ class NoPressState(NegotiationState):
19
+ pass
20
+
21
+
22
+ @dataclass
23
+ class NoPressObs(NegotiationObs):
24
+ other_value: Dict[str, float]
25
+
26
+
27
+ class NoPressSimulation(NegotiationSimulation):
28
+ def __init__(
29
+ self,
30
+ game_type: Literal["10-1-exclusive", "10-1-ties", "1-to-20"] = "1-to-20",
31
+ same_round_value: bool = True,
32
+ atleast_one_conflict: bool = False,
33
+ *args,
34
+ **kwargs,
35
+ ):
36
+ self.game_type = game_type
37
+ self.same_round_value = same_round_value
38
+ self.atleast_one_conflict = atleast_one_conflict
39
+ super().__init__(*args, **kwargs)
40
+
41
+ def _sample_values(self) -> Dict[AgentId, dict]:
42
+ values = defaultdict(dict)
43
+ if self.state is None:
44
+ item_types = self.item_types
45
+ else:
46
+ item_types = list(self.state.quantities.keys())
47
+ while True:
48
+ for item in item_types:
49
+ if self.game_type == "10-1-exclusive":
50
+ v = int(self.rng.choice([1, 10]))
51
+ values[self.agent_ids[0]][item] = v
52
+ values[self.agent_ids[1]][item] = 10 if v == 1 else 1
53
+ elif self.game_type == "10-1-ties":
54
+ for aid in self.agent_ids:
55
+ values[aid][item] = int(self.rng.choice([1, 10]))
56
+ elif self.game_type == "1-to-20":
57
+ for aid in self.agent_ids:
58
+ values[aid][item] = int(self.rng.integers(1, 21))
59
+ if self.atleast_one_conflict:
60
+ has_conflict = False
61
+ for item in item_types:
62
+ agent_values_for_item = [
63
+ values[aid][item] for aid in self.agent_ids
64
+ ]
65
+ if len(set(agent_values_for_item)) > 1:
66
+ has_conflict = True
67
+ break
68
+ if not has_conflict:
69
+ continue
70
+ agent_values = [sum(v.values()) for v in values.values()]
71
+ if len(set(agent_values)) == 1 or not self.same_round_value:
72
+ break
73
+ return values
74
+
75
+ def _sample_quantities(self) -> Dict[str, int]:
76
+ return {item.lower(): 10 for item in self.item_types}
77
+
78
+ def set_new_round_of_variant(self):
79
+ self.state.quantities = self._sample_quantities()
80
+ self.state.values = self._sample_values()
81
+ self.state.split_phase = True
82
+
83
+ def get_info_of_variant(
84
+ self, state: NegotiationState, actions: Dict[AgentId, Any]
85
+ ) -> Dict[str, Any]:
86
+ return {
87
+ "quantities": copy.deepcopy(state.quantities),
88
+ "values": copy.deepcopy(state.values),
89
+ "splits": copy.deepcopy(state.splits),
90
+ }
91
+
92
+ def get_rewards(self, splits: Dict[AgentId, Split]) -> Dict[AgentId, float]:
93
+ return compute_tas_style_rewards(
94
+ self.agent_ids, self.state.values, splits, self.state.quantities
95
+ )
96
+
97
+ def get_obs(self):
98
+ return {agent_id: self.get_obs_agent(agent_id) for agent_id in self.agent_ids}
99
+
100
+ def get_obs_agent(self, agent_id):
101
+ other_id = self._other(agent_id)
102
+ last_value_coagent = (
103
+ None
104
+ if self.state.previous_values is None
105
+ else self.state.previous_values.get(other_id)
106
+ )
107
+ last_points_coagent = (
108
+ None
109
+ if self.state.previous_points is None
110
+ else round(self.state.previous_points.get(other_id), 1)
111
+ )
112
+ last_value_agent = (
113
+ None
114
+ if self.state.previous_values is None
115
+ else self.state.previous_values.get(agent_id)
116
+ )
117
+ last_points_agent = (
118
+ None
119
+ if self.state.previous_points is None
120
+ else round(self.state.previous_points.get(agent_id), 1)
121
+ )
122
+ last_split_coagent = None
123
+ last_split_agent = None
124
+ if self.state.previous_splits is not None:
125
+ last_split_coagent = self.state.previous_splits[
126
+ other_id
127
+ ].items_given_to_self
128
+ last_split_agent = self.state.previous_splits[agent_id].items_given_to_self
129
+ obs = NoPressObs(
130
+ round_nb=self.state.round_nb,
131
+ last_message="",
132
+ quota_messages_per_agent_per_round=self.quota_messages_per_agent_per_round,
133
+ current_agent=self.state.current_agent,
134
+ other_agent=self.agent_id_to_name[other_id],
135
+ quantities=self.state.quantities,
136
+ item_types=self.item_types,
137
+ value=self.state.values[agent_id],
138
+ split_phase=self.state.split_phase,
139
+ last_split_agent=last_split_agent,
140
+ last_value_agent=last_value_agent,
141
+ last_points_agent=last_points_agent,
142
+ last_split_coagent=last_split_coagent,
143
+ last_value_coagent=last_value_coagent,
144
+ last_points_coagent=last_points_coagent,
145
+ other_value=self.state.values[other_id],
146
+ last_quantities=self.state.previous_quantities,
147
+ )
148
+ return obs
149
+
150
+ def reset(self):
151
+ start_agent = self.agent_ids[self._starting_agent_index]
152
+ quantities = self._sample_quantities()
153
+ values = self._sample_values()
154
+ self.state = NoPressState(
155
+ round_nb=0,
156
+ last_message="",
157
+ current_agent=start_agent,
158
+ quantities=quantities,
159
+ values=values,
160
+ previous_values=None,
161
+ splits={aid: None for aid in self.agent_ids},
162
+ nb_messages_sent={aid: 0 for aid in self.agent_ids},
163
+ split_phase=True,
164
+ previous_splits=None,
165
+ previous_points=None,
166
+ previous_quantities=None,
167
+ )
168
+ return self.get_obs()
src_code_for_reproducibility/markov_games/negotiation/tas_rps_simulation.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Trust-and-Split simulation.
3
+
4
+ This environment models a simple bargaining game over 10 coins with messaging.
5
+ Agents are assigned rock/paper/scissors hands, with the winner getting value 10 per coin
6
+ and the loser getting value 1 per coin. Agents alternate sending messages for a fixed
7
+ number of turns per round and then each submits a split proposal indicating how many
8
+ coins they keep for themselves. Rewards are proportional if the proposed totals exceed 10.
9
+ """
10
+
11
+ import copy
12
+ from dataclasses import dataclass
13
+ from typing import Any, Dict, List, Literal, Tuple
14
+
15
+ from numpy.random import default_rng
16
+
17
+ from mllm.markov_games.negotiation.nego_simulation import (
18
+ Message,
19
+ NegotiationObs,
20
+ NegotiationSimulation,
21
+ NegotiationState,
22
+ Split,
23
+ compute_tas_style_rewards,
24
+ )
25
+ from mllm.markov_games.rollout_tree import SimulationStepLog
26
+
27
+ AgentId = str
28
+
29
+
30
+ def _get_rps_winner(
31
+ hand1: Literal["rock", "paper", "scissors"],
32
+ hand2: Literal["rock", "paper", "scissors"],
33
+ ) -> Literal["rock", "paper", "scissors"]:
34
+ """Determine winner of rock-paper-scissors between two hands."""
35
+ if hand1 == hand2:
36
+ raise ValueError("Hands should be different")
37
+ if (
38
+ (hand1 == "rock" and hand2 == "scissors")
39
+ or (hand1 == "paper" and hand2 == "rock")
40
+ or (hand1 == "scissors" and hand2 == "paper")
41
+ ):
42
+ return hand1
43
+ else:
44
+ return hand2
45
+
46
+
47
+ @dataclass
48
+ class TrustAndSplitRPSState(NegotiationState):
49
+ hands: Dict[
50
+ AgentId, Literal["rock", "paper", "scissors"]
51
+ ] # rock, paper, or scissors
52
+ previous_hands: Dict[AgentId, Literal["rock", "paper", "scissors"]] | None
53
+
54
+
55
+ @dataclass
56
+ class TrustAndSplitRPSObs(NegotiationObs):
57
+ hand: Literal["rock", "paper", "scissors"]
58
+ last_hand_agent: Literal["rock", "paper", "scissors"] | None
59
+ last_hand_coagent: Literal["rock", "paper", "scissors"] | None
60
+ last_hand_value_coagent: Literal["upper", "lower"] | None
61
+
62
+
63
+ class TrustAndSplitRPSSimulation(NegotiationSimulation):
64
+ def __init__(
65
+ self,
66
+ alternating_hands: bool = False,
67
+ alternating_mix_ratio: float = None,
68
+ *args,
69
+ **kwargs,
70
+ ):
71
+ self.alternating_hands = alternating_hands
72
+ self.alternating_mix_ratio = alternating_mix_ratio
73
+ super().__init__(*args, **kwargs)
74
+ if self.alternating_mix_ratio is not None:
75
+ if self.rng.random() < self.alternating_mix_ratio:
76
+ self.alternating_hands = True
77
+ else:
78
+ self.alternating_hands = False
79
+
80
+ def _sample_hands_and_values(
81
+ self,
82
+ alternate_hands: bool = False,
83
+ ) -> Tuple[Dict[AgentId, str], Dict[AgentId, float]]:
84
+ hands = ["rock", "paper", "scissors"]
85
+ if alternate_hands:
86
+ previous_hands = list(self.state.previous_hands.values())
87
+ hand1, hand2 = self.rng.choice(hands, size=2, replace=False)
88
+ winner = _get_rps_winner(hand1, hand2)
89
+ loser = hand1 if winner == hand2 else hand2
90
+ previous_winner = _get_rps_winner(previous_hands[0], previous_hands[1])
91
+ agent_hands, values = {}, {}
92
+ for agent_id in self.agent_ids:
93
+ if self.state.previous_hands[agent_id] == previous_winner:
94
+ agent_hands[agent_id] = loser
95
+ values[agent_id] = 1.0
96
+ else:
97
+ agent_hands[agent_id] = winner
98
+ values[agent_id] = 10.0
99
+ return agent_hands, values
100
+ else:
101
+ # Assign different hands to each agent
102
+ hand1, hand2 = self.rng.choice(hands, size=2, replace=False)
103
+
104
+ agent_hands = {self.agent_ids[0]: hand1, self.agent_ids[1]: hand2}
105
+
106
+ # Determine winner and assign values
107
+ winner = _get_rps_winner(hand1, hand2)
108
+ values = {}
109
+ for agent_id in self.agent_ids:
110
+ if agent_hands[agent_id] == winner:
111
+ values[agent_id] = 10.0 # Winner gets value 10
112
+ else:
113
+ values[agent_id] = 1.0 # Loser gets value 1
114
+
115
+ return agent_hands, values
116
+
117
+ def set_new_round_of_variant(self):
118
+ self.state.previous_hands = copy.deepcopy(self.state.hands)
119
+ new_hands, new_values = self._sample_hands_and_values(
120
+ alternate_hands=self.alternating_hands
121
+ )
122
+ self.state.hands = new_hands
123
+ self.state.values = new_values
124
+ # Quantities are constant in TAS
125
+ self.state.quantities = {"coins": 10}
126
+ self.state.split_phase = False
127
+
128
+ def get_info_of_variant(
129
+ self, state: NegotiationState, actions: Dict[AgentId, Any]
130
+ ) -> Dict[str, Any]:
131
+ return {
132
+ "quantities": copy.deepcopy(state.quantities),
133
+ "hands": copy.deepcopy(state.hands),
134
+ "values": copy.deepcopy(state.values),
135
+ "previous_hands": copy.deepcopy(state.previous_hands),
136
+ "previous_values": copy.deepcopy(state.previous_values),
137
+ "splits": copy.deepcopy(state.splits),
138
+ }
139
+
140
+ def get_rewards(self, splits: Dict[AgentId, Split]) -> Dict[AgentId, float]:
141
+ return compute_tas_style_rewards(
142
+ self.agent_ids, self.state.values, splits, self.state.quantities
143
+ )
144
+
145
+ def get_obs_agent(self, agent_id):
146
+ """Returns observation for agent_id"""
147
+ other_id = self._other(agent_id)
148
+ last_value_coagent = (
149
+ None
150
+ if self.state.previous_values is None
151
+ else self.state.previous_values.get(other_id)
152
+ )
153
+ last_hand_coagent = (
154
+ None
155
+ if self.state.previous_hands is None
156
+ else self.state.previous_hands.get(other_id)
157
+ )
158
+ last_points_coagent = (
159
+ None
160
+ if self.state.previous_points is None
161
+ else round(self.state.previous_points.get(other_id), 1)
162
+ )
163
+ last_value_agent = (
164
+ None
165
+ if self.state.previous_values is None
166
+ else self.state.previous_values.get(agent_id)
167
+ )
168
+ last_hand_agent = (
169
+ None
170
+ if self.state.previous_hands is None
171
+ else self.state.previous_hands.get(agent_id)
172
+ )
173
+ last_points_agent = (
174
+ None
175
+ if self.state.previous_points is None
176
+ else round(self.state.previous_points.get(agent_id), 1)
177
+ )
178
+ last_split_coagent = None
179
+ last_split_agent = None
180
+ if self.state.previous_splits is not None:
181
+ last_split_coagent = self.state.previous_splits[
182
+ other_id
183
+ ].items_given_to_self["coins"]
184
+ last_split_agent = self.state.previous_splits[agent_id].items_given_to_self[
185
+ "coins"
186
+ ]
187
+ if last_hand_agent is None or last_hand_coagent is None:
188
+ last_hand_value_coagent = None
189
+ else:
190
+ winner = _get_rps_winner(last_hand_agent, last_hand_coagent)
191
+ last_hand_value_coagent = (
192
+ "upper" if winner == last_hand_coagent else "lower"
193
+ )
194
+ obs = TrustAndSplitRPSObs(
195
+ round_nb=self.state.round_nb,
196
+ last_message=self.state.last_message,
197
+ quota_messages_per_agent_per_round=self.quota_messages_per_agent_per_round,
198
+ current_agent=self.state.current_agent,
199
+ other_agent=self.agent_id_to_name[other_id],
200
+ quantities={"coins": 10},
201
+ item_types=self.item_types,
202
+ value=self.state.values[agent_id],
203
+ split_phase=self.state.split_phase,
204
+ last_split_agent=last_split_agent,
205
+ last_value_agent=last_value_agent,
206
+ last_points_agent=last_points_agent,
207
+ last_split_coagent=last_split_coagent,
208
+ last_value_coagent=last_value_coagent,
209
+ last_points_coagent=last_points_coagent,
210
+ hand=self.state.hands[agent_id],
211
+ last_hand_coagent=last_hand_coagent,
212
+ last_hand_agent=last_hand_agent,
213
+ last_quantities=self.state.previous_quantities,
214
+ last_hand_value_coagent=last_hand_value_coagent,
215
+ )
216
+ return obs
217
+
218
+ def get_state(self):
219
+ return self.state
220
+
221
+ def get_safe_copy(self):
222
+ """Return a safe copy of the simulation."""
223
+ simulation_copy = copy.copy(self)
224
+ simulation_copy.state = copy.deepcopy(self.state)
225
+ return simulation_copy
226
+
227
+ def reset(self):
228
+ """Initialize and return initial observations"""
229
+ # Decide starting agent alternating across resets for determinism
230
+ start_agent = self.agent_ids[self._starting_agent_index]
231
+ hands, values = self._sample_hands_and_values()
232
+ self.state = TrustAndSplitRPSState(
233
+ round_nb=0,
234
+ last_message="",
235
+ current_agent=start_agent,
236
+ quantities={"coins": 10},
237
+ values=values,
238
+ splits={aid: None for aid in self.agent_ids},
239
+ nb_messages_sent={aid: 0 for aid in self.agent_ids},
240
+ previous_values=None,
241
+ previous_splits=None,
242
+ previous_points=None,
243
+ split_phase=False,
244
+ hands=hands,
245
+ previous_hands=None,
246
+ previous_quantities=None,
247
+ )
248
+ return self.get_obs()
src_code_for_reproducibility/markov_games/negotiation/tas_simulation.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ from collections import defaultdict
3
+ from dataclasses import dataclass
4
+ from typing import Any, Dict, List, Literal
5
+
6
+ from numpy.random import default_rng
7
+
8
+ from mllm.markov_games.negotiation.nego_simulation import (
9
+ NegotiationObs,
10
+ NegotiationSimulation,
11
+ NegotiationState,
12
+ Split,
13
+ compute_tas_style_rewards,
14
+ )
15
+
16
+ AgentId = str
17
+
18
+
19
+ @dataclass
20
+ class TrustAndSplitState(NegotiationState):
21
+ pass
22
+
23
+
24
+ @dataclass
25
+ class TrustAndSplitObs(NegotiationObs):
26
+ pass
27
+
28
+
29
+ class TrustAndSplitSimulation(NegotiationSimulation):
30
+ def __init__(
31
+ self,
32
+ game_type: Literal["10-1-exclusive", "10-1-ties", "1-to-20"] = "1-to-20",
33
+ same_round_value: bool = True,
34
+ atleast_one_conflict: bool = False,
35
+ *args,
36
+ **kwargs,
37
+ ):
38
+ self.game_type = game_type
39
+ self.same_round_value = same_round_value
40
+ self.atleast_one_conflict = atleast_one_conflict
41
+ super().__init__(*args, **kwargs)
42
+
43
+ def _sample_values(self) -> Dict[AgentId, dict]:
44
+ values = defaultdict(dict)
45
+ if self.state is None:
46
+ item_types = self.item_types
47
+ else:
48
+ item_types = list(self.state.quantities.keys())
49
+ while True:
50
+ for item in item_types:
51
+ if self.game_type == "10-1-exclusive":
52
+ v = int(self.rng.choice([1, 10]))
53
+ values[self.agent_ids[0]][item] = v
54
+ values[self.agent_ids[1]][item] = 10 if v == 1 else 1
55
+ elif self.game_type == "10-1-ties":
56
+ for aid in self.agent_ids:
57
+ values[aid][item] = int(self.rng.choice([1, 10]))
58
+ elif self.game_type == "1-to-20":
59
+ for aid in self.agent_ids:
60
+ values[aid][item] = int(self.rng.integers(1, 21))
61
+ agent_values = [sum(v.values()) for v in values.values()]
62
+ if self.atleast_one_conflict:
63
+ has_conflict = False
64
+ for item in item_types:
65
+ agent_values_for_item = [
66
+ values[aid][item] for aid in self.agent_ids
67
+ ]
68
+ if (
69
+ len(set(agent_values_for_item)) > 1
70
+ ): # Different values for this item
71
+ has_conflict = True
72
+ break
73
+ if not has_conflict:
74
+ continue
75
+ if len(set(agent_values)) == 1 or not self.same_round_value:
76
+ break
77
+ return values
78
+
79
+ def _sample_quantities(self) -> Dict[str, int]:
80
+ return {item.lower(): 10 for item in self.item_types}
81
+
82
+ def set_new_round_of_variant(self):
83
+ self.state.quantities = self._sample_quantities()
84
+ self.state.values = self._sample_values()
85
+ self.state.split_phase = False
86
+
87
+ def get_info_of_variant(
88
+ self, state: NegotiationState, actions: Dict[AgentId, Any]
89
+ ) -> Dict[str, Any]:
90
+ return {
91
+ "quantities": copy.deepcopy(state.quantities),
92
+ "values": copy.deepcopy(state.values),
93
+ # "previous_values": copy.deepcopy(state.previous_values),
94
+ "splits": copy.deepcopy(state.splits),
95
+ }
96
+
97
+ def get_rewards(self, splits: Dict[AgentId, Split]) -> Dict[AgentId, float]:
98
+ return compute_tas_style_rewards(
99
+ self.agent_ids, self.state.values, splits, self.state.quantities
100
+ )
101
+
102
+ def get_obs(self):
103
+ return {agent_id: self.get_obs_agent(agent_id) for agent_id in self.agent_ids}
104
+
105
+ def get_obs_agent(self, agent_id):
106
+ other_id = self._other(agent_id)
107
+ last_value_coagent = (
108
+ None
109
+ if self.state.previous_values is None
110
+ else self.state.previous_values.get(other_id)
111
+ )
112
+ last_points_coagent = (
113
+ None
114
+ if self.state.previous_points is None
115
+ else round(self.state.previous_points.get(other_id), 1)
116
+ )
117
+ last_value_agent = (
118
+ None
119
+ if self.state.previous_values is None
120
+ else self.state.previous_values.get(agent_id)
121
+ )
122
+ last_points_agent = (
123
+ None
124
+ if self.state.previous_points is None
125
+ else round(self.state.previous_points.get(agent_id), 1)
126
+ )
127
+ last_split_coagent = None
128
+ last_split_agent = None
129
+ if self.state.previous_splits is not None:
130
+ last_split_coagent = self.state.previous_splits[
131
+ other_id
132
+ ].items_given_to_self
133
+ last_split_agent = self.state.previous_splits[agent_id].items_given_to_self
134
+ obs = TrustAndSplitObs(
135
+ round_nb=self.state.round_nb,
136
+ last_message=self.state.last_message,
137
+ quota_messages_per_agent_per_round=self.quota_messages_per_agent_per_round,
138
+ current_agent=self.state.current_agent,
139
+ other_agent=self.agent_id_to_name[other_id],
140
+ quantities=self.state.quantities,
141
+ item_types=self.item_types,
142
+ value=self.state.values[agent_id],
143
+ split_phase=self.state.split_phase,
144
+ last_split_agent=last_split_agent,
145
+ last_value_agent=last_value_agent,
146
+ last_points_agent=last_points_agent,
147
+ last_split_coagent=last_split_coagent,
148
+ last_value_coagent=last_value_coagent,
149
+ last_points_coagent=last_points_coagent,
150
+ last_quantities=self.state.previous_quantities,
151
+ )
152
+ return obs
153
+
154
+ def reset(self):
155
+ start_agent = self.agent_ids[self._starting_agent_index]
156
+ quantities = self._sample_quantities()
157
+ values = self._sample_values()
158
+ self.state = TrustAndSplitState(
159
+ round_nb=0,
160
+ last_message="",
161
+ current_agent=start_agent,
162
+ quantities=quantities,
163
+ values=values,
164
+ previous_values=None,
165
+ splits={aid: None for aid in self.agent_ids},
166
+ nb_messages_sent={aid: 0 for aid in self.agent_ids},
167
+ split_phase=False,
168
+ previous_splits=None,
169
+ previous_points=None,
170
+ previous_quantities=None,
171
+ )
172
+ return self.get_obs()
src_code_for_reproducibility/markov_games/rollout_tree.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ TODO: add parent to nodes so that some verification can be done. For instance, to ensure that node reward keys match the parent node.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ import json
8
+ from dataclasses import dataclass
9
+ from pathlib import Path
10
+ from typing import Any, List, Literal, Optional, Tuple
11
+
12
+ import jsonschema
13
+ from pydantic import BaseModel, Field, model_validator
14
+
15
+ from mllm.chat_utils.chat_turn import ChatTurn
16
+
17
+ AgentId = str
18
+
19
+
20
+ class SimulationStepLog(BaseModel):
21
+ rewards: dict[AgentId, float]
22
+ info: Any = None
23
+
24
+
25
+ class AgentActLog(BaseModel):
26
+ chat_turns: list[ChatTurn] | None
27
+ info: Any = None
28
+
29
+ @model_validator(mode="after")
30
+ def _exactly_one_state_end(self):
31
+ """
32
+ This method is used to enforce that for each AgentActLog, there is exactly one ChatTurn which is a state end.
33
+ """
34
+ if self.chat_turns != []:
35
+ n = sum(1 for t in self.chat_turns if t.is_state_end)
36
+ if n != 1:
37
+ raise ValueError(
38
+ f"AgentActLog must have exactly one ChatTurn with is_state_end=True; got {self.chat_turns}."
39
+ )
40
+ return self
41
+ else:
42
+ return self
43
+
44
+
45
+ class StepLog(BaseModel):
46
+ action_logs: dict[AgentId, AgentActLog]
47
+ simulation_step_log: SimulationStepLog
48
+
49
+
50
+ # BranchType = Literal["unilateral_deviation", "common_deviation"] # might not be necessary
51
+ # class BranchNodeInfo(BaseModel):
52
+ # branch_id: str
53
+ # branch_for: AgentId
54
+ # branch_type: BranchType
55
+
56
+
57
+ class RolloutTreeNode(BaseModel):
58
+ step_log: StepLog
59
+ time_step: int
60
+ child: RolloutTreeNode | RolloutTreeBranchNode | None = None
61
+
62
+
63
+ class RolloutTreeBranchNode(BaseModel):
64
+ """
65
+ First item of the tuple indicates which agent "called" for an alternative branch.
66
+ """
67
+
68
+ main_child: RolloutTreeNode
69
+ branches: dict[AgentId, list[RolloutTreeNode]] | None = None
70
+
71
+
72
+ class RolloutTreeRootNode(BaseModel):
73
+ id: int
74
+ crn_id: int # ID of the rng used to generate this rollout tree
75
+ child: RolloutTreeNode | RolloutTreeBranchNode | None = None
76
+ agent_ids: List[AgentId] = Field(min_length=1)
77
+
78
+
79
+ # class RolloutTreeLeafNode(BaseModel):
80
+ # step_log: StepLog
81
+ # time_step: int
82
+
83
+
84
+ # Necessary for self-referential stuff in pydantic
85
+ RolloutTreeBranchNode.model_rebuild()
86
+ RolloutTreeNode.model_rebuild()
src_code_for_reproducibility/markov_games/run_markov_games.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ from collections.abc import Callable
3
+ from dataclasses import dataclass
4
+
5
+ from torch._C import ClassType
6
+
7
+ from mllm.markov_games.markov_game import MarkovGame
8
+ from mllm.markov_games.rollout_tree import RolloutTreeRootNode
9
+
10
+
11
+ async def run_markov_games(
12
+ runner: Callable[[MarkovGame], RolloutTreeRootNode],
13
+ runner_kwargs: dict,
14
+ output_folder: str,
15
+ markov_games: list[MarkovGame],
16
+ ) -> list[RolloutTreeRootNode]:
17
+ tasks = []
18
+ for mg in markov_games:
19
+ tasks.append(
20
+ asyncio.create_task(
21
+ runner(markov_game=mg, output_folder=output_folder, **runner_kwargs)
22
+ )
23
+ )
24
+ return await asyncio.gather(*tasks)
src_code_for_reproducibility/markov_games/simulation.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A Simulation is the environment of a Markov Game.
3
+ The Simulation is not responsible for properly checking / formatting the responses of LLM's.
4
+ This is the job of the `Agent` class.
5
+ Simulations expect clean actions, and are defined similarly to `gymnasium` environments, except that they are adapted for the Multi-agent setting.
6
+ """
7
+
8
+ from abc import ABC, abstractmethod
9
+ from typing import Any, Tuple
10
+
11
+ from numpy.random import default_rng
12
+
13
+ from mllm.markov_games.rollout_tree import SimulationStepLog
14
+
15
+
16
+ class Simulation(ABC):
17
+ @abstractmethod
18
+ def __init__(self, seed: int, *args, **kwargs):
19
+ self.seed = seed
20
+ self.rng = default_rng(self.seed)
21
+
22
+ @abstractmethod
23
+ def step(self, actions: Any) -> Tuple[bool, SimulationStepLog]:
24
+ """
25
+ Returns terminated, info
26
+ """
27
+ raise NotImplementedError
28
+
29
+ def get_obs(self):
30
+ """Returns all agent observations in dict
31
+
32
+ Returns:
33
+ observations
34
+ """
35
+ raise NotImplementedError
36
+
37
+ def get_obs_agent(self, agent_id):
38
+ """Returns observation for agent_id"""
39
+ raise NotImplementedError
40
+
41
+ def get_obs_size(self):
42
+ """Returns the shape of the observation"""
43
+ raise NotImplementedError
44
+
45
+ def get_state(self):
46
+ raise NotImplementedError
47
+
48
+ def get_state_size(self):
49
+ """Returns the shape of the state"""
50
+ raise NotImplementedError
51
+
52
+ def get_avail_actions(self):
53
+ raise NotImplementedError
54
+
55
+ def get_avail_agent_actions(self, agent_id):
56
+ """Returns the available actions for agent_id"""
57
+ raise NotImplementedError
58
+
59
+ def get_total_actions(self):
60
+ """Returns the total number of actions an agent could ever take"""
61
+ # TODO: This is only suitable for a discrete 1 dimensional action space for each agent
62
+ raise NotImplementedError
63
+
64
+ def get_safe_copy(self):
65
+ """
66
+ Return copy of the agent object that is decorrelated from the original object.
67
+ """
68
+ raise NotImplementedError
69
+
70
+ def reset(self):
71
+ """Returns initial observations and states"""
72
+ raise NotImplementedError
73
+
74
+ def render(self):
75
+ raise NotImplementedError
76
+
77
+ def close(self):
78
+ raise NotImplementedError
79
+
80
+ # def seed(self):
81
+ # raise NotImplementedError
82
+
83
+ def save_replay(self):
84
+ raise NotImplementedError
85
+
86
+ def get_simulation_info(self):
87
+ raise NotImplementedError
src_code_for_reproducibility/markov_games/vine_ppo.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from anytree import Node, RenderTree
2
+ from anytree.exporter import DotExporter
3
+ import os.path
4
+ import asyncio
5
+ from mllm.markov_games.markov_game import MarkovGame
6
+
7
+ async def VinePPORunner(
8
+ markov_game: MarkovGame,
9
+ **kwargs):
10
+ pass
src_code_for_reproducibility/models/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (155 Bytes). View file
 
src_code_for_reproducibility/models/__pycache__/adapter_training_wrapper.cpython-312.pyc ADDED
Binary file (4.92 kB). View file
 
src_code_for_reproducibility/models/__pycache__/human_policy.cpython-312.pyc ADDED
Binary file (11.9 kB). View file
 
src_code_for_reproducibility/models/__pycache__/inference_backend.cpython-312.pyc ADDED
Binary file (2.24 kB). View file
 
src_code_for_reproducibility/models/__pycache__/inference_backend_dummy.cpython-312.pyc ADDED
Binary file (2.34 kB). View file
 
src_code_for_reproducibility/models/__pycache__/inference_backend_sglang.cpython-312.pyc ADDED
Binary file (3.67 kB). View file
 
src_code_for_reproducibility/models/__pycache__/inference_backend_vllm.cpython-312.pyc ADDED
Binary file (4.98 kB). View file
 
src_code_for_reproducibility/models/__pycache__/large_language_model_api.cpython-312.pyc ADDED
Binary file (6.94 kB). View file
 
src_code_for_reproducibility/models/__pycache__/large_language_model_local.cpython-312.pyc ADDED
Binary file (16.7 kB). View file
 
src_code_for_reproducibility/models/__pycache__/scalar_critic.cpython-312.pyc ADDED
Binary file (3.21 kB). View file
 
src_code_for_reproducibility/training/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (157 Bytes). View file
 
src_code_for_reproducibility/training/__pycache__/annealing_methods.cpython-312.pyc ADDED
Binary file (502 Bytes). View file