Muqeeth commited on
Commit
8778c8a
·
verified ·
1 Parent(s): 4fdfd99

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. src_code_for_reproducibility/chat_utils/__pycache__/apply_template.cpython-312.pyc +0 -0
  2. src_code_for_reproducibility/docs/source/conf.py +48 -0
  3. src_code_for_reproducibility/docs/source/contributing.rst +0 -0
  4. src_code_for_reproducibility/docs/source/environments/diplomacy.rst +459 -0
  5. src_code_for_reproducibility/docs/source/environments/ipd.rst +411 -0
  6. src_code_for_reproducibility/docs/source/index.rst +22 -0
  7. src_code_for_reproducibility/docs/source/installation.rst +10 -0
  8. src_code_for_reproducibility/docs/source/launch.rst +0 -0
  9. src_code_for_reproducibility/docs/source/marl_standard.rst +141 -0
  10. src_code_for_reproducibility/docs/source/media/runbatch.png +0 -0
  11. src_code_for_reproducibility/docs/source/src.environments.dond.dond_agent.rst +7 -0
  12. src_code_for_reproducibility/docs/source/src.environments.dond.dond_game.rst +7 -0
  13. src_code_for_reproducibility/docs/source/src.environments.dond.dond_log_funcs.rst +7 -0
  14. src_code_for_reproducibility/docs/source/src.environments.dond.dond_player.rst +7 -0
  15. src_code_for_reproducibility/docs/source/src.environments.dond.dond_return_funcs.rst +7 -0
  16. src_code_for_reproducibility/docs/source/src.environments.dond.dond_training_data_funcs.rst +7 -0
  17. src_code_for_reproducibility/docs/source/src.environments.env_imports.rst +7 -0
  18. src_code_for_reproducibility/docs/source/src.environments.environment_imports.rst +7 -0
  19. src_code_for_reproducibility/docs/source/src.environments.ipd.ipd_agent.rst +7 -0
  20. src_code_for_reproducibility/docs/source/src.environments.ipd.ipd_game.rst +7 -0
  21. src_code_for_reproducibility/docs/source/src.environments.ipd.ipd_statistics_funcs.rst +7 -0
  22. src_code_for_reproducibility/docs/source/src.environments.rst +25 -0
  23. src_code_for_reproducibility/docs/source/src.experiments.arithmetic_test.rst +7 -0
  24. src_code_for_reproducibility/docs/source/src.experiments.last_completion.rst +7 -0
  25. src_code_for_reproducibility/docs/source/src.generation.rst +15 -0
  26. src_code_for_reproducibility/docs/source/src.models.dummy_local_llm.rst +7 -0
  27. src_code_for_reproducibility/docs/source/src.models.local_llm.rst +7 -0
  28. src_code_for_reproducibility/docs/source/src.models.new_local_llm.rst +7 -0
  29. src_code_for_reproducibility/docs/source/src.models.rst +20 -0
  30. src_code_for_reproducibility/docs/source/src.models.updatable_worker.rst +7 -0
  31. src_code_for_reproducibility/docs/source/src.training.reinforce_training.rst +7 -0
  32. src_code_for_reproducibility/docs/source/src.training.rl_convs_processing.rst +7 -0
  33. src_code_for_reproducibility/docs/source/src.training.rst +19 -0
  34. src_code_for_reproducibility/docs/source/src.utils.log_gpu_usage.rst +7 -0
  35. src_code_for_reproducibility/docs/source/src.utils.rst +24 -0
  36. src_code_for_reproducibility/docs/source/src.utils.update_start_epoch.rst +7 -0
  37. src_code_for_reproducibility/docs/source/usage.rst +0 -0
  38. src_code_for_reproducibility/markov_games/__pycache__/__init__.cpython-311.pyc +0 -0
  39. src_code_for_reproducibility/markov_games/__pycache__/__init__.cpython-312.pyc +0 -0
  40. src_code_for_reproducibility/markov_games/__pycache__/agent.cpython-312.pyc +0 -0
  41. src_code_for_reproducibility/markov_games/__pycache__/alternative_actions_runner.cpython-312.pyc +0 -0
  42. src_code_for_reproducibility/markov_games/__pycache__/gather_and_export_utils.cpython-312.pyc +0 -0
  43. src_code_for_reproducibility/markov_games/__pycache__/group_timesteps.cpython-312.pyc +0 -0
  44. src_code_for_reproducibility/markov_games/__pycache__/linear_runner.cpython-312.pyc +0 -0
  45. src_code_for_reproducibility/markov_games/__pycache__/markov_game.cpython-312.pyc +0 -0
  46. src_code_for_reproducibility/markov_games/__pycache__/rollout_tree.cpython-311.pyc +0 -0
  47. src_code_for_reproducibility/markov_games/__pycache__/rollout_tree.cpython-312.pyc +0 -0
  48. src_code_for_reproducibility/markov_games/__pycache__/run_markov_games.cpython-312.pyc +0 -0
  49. src_code_for_reproducibility/markov_games/__pycache__/simulation.cpython-312.pyc +0 -0
  50. src_code_for_reproducibility/markov_games/diplomacy/diplomacy_agent.py +259 -0
src_code_for_reproducibility/chat_utils/__pycache__/apply_template.cpython-312.pyc ADDED
Binary file (3.64 kB). View file
 
src_code_for_reproducibility/docs/source/conf.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Configuration file for the Sphinx documentation builder.
2
+ import os
3
+ import sys
4
+ sys.path.insert(0, os.path.abspath('../..'))
5
+
6
+ # -- Project information -----------------------------------------------------
7
+ project = 'llm_negotiation'
8
+ copyright = '2023, Your Name'
9
+ author = 'Your Name'
10
+
11
+ # -- General configuration ---------------------------------------------------
12
+ extensions = [
13
+ 'sphinx.ext.autodoc',
14
+ 'sphinx.ext.viewcode',
15
+ 'sphinx.ext.napoleon',
16
+ 'sphinx.ext.autosummary',
17
+ 'sphinx.ext.intersphinx',
18
+ 'sphinx.ext.mathjax',
19
+ 'sphinxcontrib.mermaid',
20
+ 'sphinx_rtd_theme',
21
+ ]
22
+
23
+ templates_path = ['_templates']
24
+ exclude_patterns = []
25
+
26
+ # -- Options for HTML output -------------------------------------------------
27
+ html_theme = 'sphinx_rtd_theme'
28
+ html_static_path = ['_static']
29
+
30
+ # -- Napoleon settings -------------------------------------------------------
31
+ napoleon_google_docstring = True
32
+ napoleon_numpy_docstring = False
33
+ napoleon_include_init_with_doc = True
34
+ napoleon_include_private_with_doc = False
35
+ napoleon_include_special_with_doc = True
36
+ napoleon_use_admonition_for_examples = False
37
+ napoleon_use_admonition_for_notes = False
38
+ napoleon_use_admonition_for_references = False
39
+ napoleon_use_ivar = False
40
+ napoleon_use_param = True
41
+ napoleon_use_rtype = True
42
+ napoleon_preprocess_types = False
43
+ napoleon_type_aliases = None
44
+ napoleon_attr_annotations = True
45
+
46
+ # -- Path setup --------------------------------------------------------------
47
+ # Make sure the project's modules can be found by Sphinx
48
+ sys.path.insert(0, os.path.abspath('../../src'))
src_code_for_reproducibility/docs/source/contributing.rst ADDED
File without changes
src_code_for_reproducibility/docs/source/environments/diplomacy.rst ADDED
@@ -0,0 +1,459 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ =================
2
+ Diplomacy
3
+ =================
4
+
5
+ The Diplomacy environment provides a multi-agent negotiation interface for the classic board game Diplomacy,
6
+ based on DeepMind's implementation. This document describes the API for interacting with the Diplomacy environment
7
+ and its associated agent handler.
8
+
9
+ Overview
10
+ --------
11
+
12
+ Diplomacy is a strategic board game set in Europe before World War I, where players control one of seven European powers
13
+ and negotiate with each other to gain control of supply centers. The game is played in turns, with each turn consisting
14
+ of movement phases, retreat phases, and build phases.
15
+
16
+ Our implementation adapts DeepMind's Diplomacy code to the Multi-Agent Negotiation Environment standard, allowing it
17
+ to be used with LLM agents through a text-based interface.
18
+
19
+ Game Rules
20
+ ----------
21
+
22
+ ### Game Board and Powers
23
+
24
+ Diplomacy is played on a map of Europe divided into provinces. The game features seven Great Powers that players can control:
25
+
26
+ - England (blue)
27
+ - France (light blue)
28
+ - Germany (black)
29
+ - Italy (green)
30
+ - Austria-Hungary (red)
31
+ - Russia (white)
32
+ - Turkey (yellow)
33
+
34
+ Each power begins with three supply centers (except Russia, which starts with four) and an equal number of units.
35
+
36
+ ### Units and Movement
37
+
38
+ There are two types of units in Diplomacy:
39
+ - **Armies (A)**: Can move to adjacent land provinces or be convoyed across water by fleets
40
+ - **Fleets (F)**: Can move to adjacent coastal provinces and sea regions
41
+
42
+ During movement phases, each unit can execute one of these orders:
43
+ - **Hold**: The unit remains in its current province (e.g., "A PAR H")
44
+ - Format: [Unit Type] [Province] H
45
+ - Example: "A PAR H" means "Army in Paris holds its position"
46
+
47
+ - **Move**: The unit attempts to move to an adjacent province (e.g., "A PAR - BUR")
48
+ - Format: [Unit Type] [Current Province] - [Destination Province]
49
+ - Example: "A PAR - BUR" means "Army in Paris moves to Burgundy"
50
+ - Example: "F BRE - ENG" means "Fleet in Brest moves to the English Channel"
51
+
52
+ - **Support**: The unit supports another unit's move or hold (e.g., "A PAR S A MAR - BUR")
53
+ - Format for supporting a move: [Unit Type] [Province] S [Unit Type] [Province] - [Destination]
54
+ - Format for supporting a hold: [Unit Type] [Province] S [Unit Type] [Province]
55
+ - Example: "A PAR S A MAR - BUR" means "Army in Paris supports the Army in Marseille's move to Burgundy"
56
+ - Example: "F LON S F NTH" means "Fleet in London supports the Fleet in North Sea holding its position"
57
+
58
+ - **Convoy**: A fleet can convoy an army across water (e.g., "F ENG C A LON - BRE")
59
+ - Format: [Fleet] [Sea Province] C [Army] [Coastal Province] - [Coastal Province]
60
+ - Example: "F ENG C A LON - BRE" means "Fleet in English Channel convoys the Army in London to Brest"
61
+
62
+ All orders are executed simultaneously, and conflicts are resolved based on strength (number of supporting units).
63
+
64
+ ### Common Province Abbreviations
65
+
66
+ Diplomacy uses three-letter abbreviations for provinces. Some common ones include:
67
+ - **PAR**: Paris
68
+ - **LON**: London
69
+ - **BER**: Berlin
70
+ - **MUN**: Munich
71
+ - **BUR**: Burgundy
72
+ - **MAR**: Marseilles
73
+ - **BRE**: Brest
74
+ - **ENG**: English Channel
75
+ - **NTH**: North Sea
76
+ - **VIE**: Vienna
77
+ - **ROM**: Rome
78
+ - **VEN**: Venice
79
+ - **MOW**: Moscow
80
+ - **CON**: Constantinople
81
+
82
+ ### Example: Movement and Conflicts
83
+
84
+ For example, if France orders "A PAR - BUR" and Germany orders "A MUN - BUR", neither move succeeds as they have equal strength. However, if France also orders "A MAR S A PAR - BUR", then the French army from Paris would successfully move to Burgundy with strength of 2 against Germany's strength of 1.
85
+
86
+ ### Turn Structure
87
+
88
+ A game year consists of five phases:
89
+ 1. **Spring Movement**: All powers submit orders for their units
90
+ 2. **Spring Retreat**: Units dislodged in the movement phase must retreat or be disbanded
91
+ 3. **Fall Movement**: Another round of movement orders
92
+ 4. **Fall Retreat**: Retreat orders for dislodged units
93
+ 5. **Winter Adjustment**: Powers gain or lose units based on the number of supply centers they control
94
+
95
+ ### Supply Centers and Building
96
+
97
+ Supply centers (marked on the map) are key to victory. When a power occupies a supply center during a Fall turn, they gain control of it. During the Winter Adjustment phase:
98
+ - If you control more supply centers than you have units, you can build new units in your home supply centers
99
+ - If you control fewer supply centers than you have units, you must remove excess units
100
+
101
+ ### Example: Building and Removing Units
102
+
103
+ If France controls 5 supply centers but only has 4 units, during the Winter phase they can build one new unit in an unoccupied home supply center (Paris, Marseilles, or Brest). Conversely, if France controls only 3 supply centers but has 4 units, they must remove one unit of their choice.
104
+
105
+ ### Negotiation
106
+
107
+ A critical component of Diplomacy is the negotiation between players. Before submitting orders, players can communicate freely to form alliances, coordinate attacks, or mislead opponents. These negotiations are not binding, and betrayal is a common strategy.
108
+
109
+ ### Example: Alliance and Betrayal
110
+
111
+ England and France might agree to an alliance against Germany, with England promising to support France's move into Belgium. However, England could secretly order their fleet to move into Belgium themselves or support a German move instead.
112
+
113
+ ### Victory Conditions
114
+
115
+ The game ends when one power controls 18 or more supply centers (majority of the 34 total centers), or when players agree to a draw. In tournament settings, games may also end after a predetermined number of game years.
116
+
117
+ DiplomacyEnv
118
+ ------------
119
+
120
+ The ``DiplomacyEnv`` class provides an interface to the Diplomacy game environment that follows the Multi-Agent
121
+ Negotiation Environment standard.
122
+
123
+ .. code-block:: python
124
+
125
+ class DiplomacyEnv:
126
+ """
127
+ Multi-Agent Negotiation Environment for Diplomacy, adapting Deepmind's implementation
128
+ to the MarlEnvironment standard.
129
+ """
130
+ def __init__(self,
131
+ initial_state: Optional[DiplomacyState] = None,
132
+ max_turns: int = 100,
133
+ points_per_supply_centre: bool = True,
134
+ forced_draw_probability: float = 0.0,
135
+ min_years_forced_draw: int = 35):
136
+ """Initialize the Diplomacy environment.
137
+
138
+ Args:
139
+ initial_state: Initial DiplomacyState (optional)
140
+ max_turns: Maximum number of turns in the game
141
+ points_per_supply_centre: Whether to award points per supply center in case of a draw
142
+ forced_draw_probability: Probability of forcing a draw after min_years_forced_draw
143
+ min_years_forced_draw: Minimum years before considering a forced draw
144
+ """
145
+ # ...
146
+
147
+ def reset(self):
148
+ """Reset the environment to an initial state and return the initial observation.
149
+
150
+ Returns:
151
+ observation (dict): A dictionary where keys are agent identifiers and values are observations.
152
+ Each observation contains:
153
+ - board_state: Current state of the board
154
+ - current_season: Current season in the game
155
+ - player_index: Index of the player's power
156
+ - possible_actions: List of possible actions in DeepMind's format
157
+ - human_readable_actions: List of human-readable action descriptions
158
+ - supply_centers: List of supply centers owned by the player
159
+ - units: List of units owned by the player
160
+ - year: Current year in the game
161
+ """
162
+ # ...
163
+
164
+ def step(self, actions):
165
+ """Take a step in the environment using the provided actions.
166
+
167
+ Args:
168
+ actions (dict): A dictionary where keys are agent identifiers and values are actions.
169
+ Actions can be:
170
+ - List of integer actions in DeepMind's format
171
+ - List of string actions in text format (e.g., "A MUN - BER")
172
+
173
+ Returns:
174
+ observations (dict): A dictionary where keys are agent identifiers and values are observations.
175
+ Each observation has the same structure as in reset().
176
+ done (bool): Whether the episode has ended.
177
+ info (dict): Additional information about the environment, including:
178
+ - turn: Current turn number
179
+ - returns: Game returns if the game is done, otherwise None
180
+ - waiting_for: List of agents that still need to provide actions (if not all actions are provided)
181
+ """
182
+ # ...
183
+
184
+ def get_log_info(self):
185
+ """Get additional information about the environment for logging.
186
+
187
+ Returns:
188
+ log_info (dict): Information about the environment required to log the game, including:
189
+ - power_names: List of power names
190
+ - game_history: History of the game
191
+ - current_turn: Current turn number
192
+ - current_season: Current season name
193
+ - supply_centers: Dictionary mapping power names to supply center counts
194
+ """
195
+ # ...
196
+
197
+ def render(self):
198
+ """Render the current state of the environment.
199
+
200
+ Displays a visualization of the current game state.
201
+ """
202
+ # ...
203
+
204
+ def close(self):
205
+ """Perform any necessary cleanup."""
206
+ # ...
207
+
208
+
209
+ Key Implementation Details
210
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
211
+
212
+ The ``DiplomacyEnv`` class implements several key features:
213
+
214
+ 1. **Multi-Agent Support**: The environment tracks multiple agents (powers) and manages their interactions.
215
+
216
+ 2. **Turn-Based Gameplay**: The environment enforces the turn structure of Diplomacy, including different phases.
217
+
218
+ 3. **Action Processing**: The environment can handle actions in both text format and DeepMind's integer format.
219
+
220
+ 4. **Observation Generation**: The environment generates detailed observations for each agent, including board state, supply centers, and possible actions.
221
+
222
+ 5. **Game Termination**: The environment tracks game termination conditions, including supply center victory and maximum turn limits.
223
+
224
+ Observation Structure
225
+ ~~~~~~~~~~~~~~~~~~~~
226
+
227
+ Each agent receives an observation dictionary with the following structure:
228
+
229
+ .. code-block:: python
230
+
231
+ {
232
+ "board_state": np.ndarray, # Board state representation
233
+ "current_season": int, # Season index (0-4)
234
+ "player_index": int, # Index of the player's power (0-6)
235
+ "possible_actions": [int], # List of possible actions in DeepMind's format
236
+ "human_readable_actions": [str], # List of human-readable action descriptions
237
+ "supply_centers": [str], # List of supply centers owned by the player
238
+ "units": [dict], # List of units owned by the player
239
+ "year": int # Current year in the game
240
+ }
241
+
242
+ Action Structure
243
+ ~~~~~~~~~~~~~~~
244
+
245
+ Actions can be provided in two formats:
246
+
247
+ 1. **Text Format**: String actions like ``"A MUN - BER"`` or ``"F NTH C A LON - BEL"``.
248
+
249
+ 2. **Integer Format**: Lists of integers corresponding to DeepMind's action representation.
250
+
251
+ The environment will convert text actions to the internal format as needed.
252
+
253
+ DiplomacyAgent
254
+ --------------
255
+
256
+ The ``DiplomacyAgent`` class implements the agent handler interface for Diplomacy, processing observations from the environment and generating actions through an LLM.
257
+
258
+ .. code-block:: python
259
+
260
+ class DiplomacyAgent:
261
+ """
262
+ Agent handler for Diplomacy, implementing the AgentState interface
263
+ for the multi-agent negotiation standard.
264
+ """
265
+
266
+ def __init__(self,
267
+ power_name: str,
268
+ use_text_interface: bool = True,
269
+ system_prompt: Optional[str] = None):
270
+ """Initialize the Diplomacy agent handler.
271
+
272
+ Args:
273
+ power_name: Name of the power this agent controls
274
+ use_text_interface: Whether to use text-based interface (vs. structured)
275
+ system_prompt: Optional system prompt to use for the LLM
276
+ """
277
+ # ...
278
+
279
+ def step(self, observation_from_env, policy_output=None):
280
+ """Update the agent state based on the observation and action.
281
+
282
+ Args:
283
+ observation_from_env: The observation from the environment, with structure:
284
+ - board_state: Current state of the board
285
+ - current_season: Current season in the game
286
+ - player_index: Index of the player's power
287
+ - possible_actions: List of possible actions
288
+ - human_readable_actions: List of human-readable action descriptions
289
+ - supply_centers: List of supply centers owned by the player
290
+ - units: List of units owned by the player
291
+ - year: Current year in the game
292
+
293
+ policy_output: The output of the policy (LLM response), or None for initial prompt
294
+
295
+ Returns:
296
+ policy_id (str): The policy identifier ("llm_policy")
297
+ policy_input (dict): The input to the policy, with structure:
298
+ - messages: List of conversation messages in the format:
299
+ [{"role": "system", "content": "..."},
300
+ {"role": "user", "content": "..."}]
301
+ action: The official action to be sent to the environment, or None if not ready
302
+ done (bool): Whether the LLM action is ready to be sent to the environment
303
+ info (dict): Additional information about the agent:
304
+ - valid_action: Whether the extracted action is valid
305
+ """
306
+ # ...
307
+
308
+ def get_log_info(self):
309
+ """Get information about the agent required to log a trajectory.
310
+
311
+ Returns:
312
+ log_info (dict): Information about the agent required to log a trajectory:
313
+ - power_name: Name of the power this agent controls
314
+ - conversation_history: List of conversation messages
315
+ - current_action: The current action, if any
316
+ """
317
+ # ...
318
+
319
+ def render(self):
320
+ """Render the current state of the agent.
321
+
322
+ Displays the agent's current state, including conversation history.
323
+ """
324
+ # ...
325
+
326
+ def close(self):
327
+ """Perform any necessary cleanup."""
328
+ # ...
329
+
330
+
331
+ Key Implementation Details
332
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
333
+
334
+ The ``DiplomacyAgent`` class implements several key features:
335
+
336
+ 1. **LLM Interaction**: The agent generates prompts for an LLM and processes the LLM's responses to extract actions.
337
+
338
+ 2. **Conversation Management**: The agent maintains a conversation history for coherent interactions with the LLM.
339
+
340
+ 3. **Action Validation**: The agent validates extracted actions against the set of possible actions provided by the environment.
341
+
342
+ 4. **Error Handling**: The agent generates clarification prompts when invalid actions are detected.
343
+
344
+ 5. **Text-Based Interface**: The agent formats game state information into human-readable text for the LLM.
345
+
346
+ Prompt Structure
347
+ ~~~~~~~~~~~~~~~
348
+
349
+ The agent generates prompts that include:
350
+
351
+ 1. **System Prompt**: Instructions and context for the LLM, explaining its role as a Diplomacy player.
352
+
353
+ 2. **Game State Description**: A text description of the current game state, including:
354
+ - Current year and season
355
+ - Supply centers owned
356
+ - Units controlled
357
+ - Possible actions
358
+
359
+ 3. **Action Request**: Instructions on how to format actions.
360
+
361
+ Example system prompt:
362
+
363
+ .. code-block:: text
364
+
365
+ You are playing the role of FRANCE in a game of Diplomacy.
366
+ Your goal is to control as many supply centers as possible.
367
+ You can negotiate with other players and form alliances, but remember that
368
+ these alliances are not binding. When you need to submit orders for your units,
369
+ write them in the correct format, with each order on a new line.
370
+
371
+ Example game state description:
372
+
373
+ .. code-block:: text
374
+
375
+ Year: 1901, Season: SPRING_MOVES
376
+ You are playing as FRANCE.
377
+ You currently control 3 supply centers: PAR, MAR, BRE.
378
+ Your units are: A PAR, A MAR, F BRE.
379
+
380
+ Please provide orders for your units. Here are your possible actions:
381
+ A PAR - BUR
382
+ A PAR - GAS
383
+ A PAR - PIC
384
+ A PAR H
385
+ ...
386
+
387
+ Submit your orders, one per line, in the format like: "A MUN - BER" or "F NTH C A LON - BEL"
388
+
389
+ Running Diplomacy Games
390
+ ----------------------
391
+
392
+ To run Diplomacy games with LLM agents, you can use the ``run_batched_matches`` function with the ``DiplomacyEnv`` and ``DiplomacyAgent`` classes:
393
+
394
+ .. code-block:: python
395
+
396
+ from mllm.environments.diplomacy.diplomacy_env import DiplomacyEnv
397
+ from mllm.environments.diplomacy.diplomacy_agent import DiplomacyAgent
398
+ from mllm.run_matches import run_batched_matches
399
+
400
+ # Create environment and agent handlers
401
+ env = DiplomacyEnv(max_turns=30)
402
+
403
+ agent_handlers = {
404
+ "AUSTRIA": DiplomacyAgent(power_name="AUSTRIA"),
405
+ "ENGLAND": DiplomacyAgent(power_name="ENGLAND"),
406
+ "FRANCE": DiplomacyAgent(power_name="FRANCE"),
407
+ "GERMANY": DiplomacyAgent(power_name="GERMANY"),
408
+ "ITALY": DiplomacyAgent(power_name="ITALY"),
409
+ "RUSSIA": DiplomacyAgent(power_name="RUSSIA"),
410
+ "TURKEY": DiplomacyAgent(power_name="TURKEY")
411
+ }
412
+
413
+ # Define policy mapping (mapping from policy IDs to actual policy functions)
414
+ policy_mapping = {
415
+ "llm_policy": my_llm_policy_function
416
+ }
417
+
418
+ # Run the game
419
+ game_results = run_batched_matches(
420
+ envs=[env],
421
+ agent_handlers_per_env=[agent_handlers],
422
+ policy_mapping=policy_mapping,
423
+ max_parallel_matches=1
424
+ )
425
+
426
+ # Process results
427
+ for result in game_results:
428
+ print(f"Game finished. Winner: {result['winner']}")
429
+ print(f"Supply centers: {result['supply_centers']}")
430
+
431
+ This setup allows you to run Diplomacy games with LLM agents using the Multi-Agent Negotiation Environment standard.
432
+
433
+ Limitations and Considerations
434
+ -----------------------------
435
+
436
+ 1. **Performance**: Processing observations and actions for seven powers using LLMs can be computationally intensive.
437
+
438
+ 2. **Action Parsing**: Extracting valid actions from LLM outputs may require sophisticated parsing and error handling.
439
+
440
+ 3. **Game Complexity**: Diplomacy is a complex game with many rules and edge cases, which may be challenging for LLMs to fully grasp.
441
+
442
+ 4. **Turn Duration**: Real Diplomacy games include negotiation phases of variable duration, which are not fully captured in this implementation.
443
+
444
+ 5. **Text Formatting**: The quality of LLM interactions depends heavily on the formatting and clarity of text prompts.
445
+
446
+ Advanced Usage
447
+ ------------
448
+
449
+ For advanced usage, you can customize:
450
+
451
+ 1. **System Prompts**: Modify agent behavior by providing custom system prompts.
452
+
453
+ 2. **Observation Processing**: Extend the observation processing to include additional information.
454
+
455
+ 3. **Action Parsing**: Implement more sophisticated action parsing for complex orders.
456
+
457
+ 4. **Visualization**: Add custom visualization methods to the environment's render function.
458
+
459
+ 5. **Logging**: Extend the logging capabilities to capture additional information about the game state.
src_code_for_reproducibility/docs/source/environments/ipd.rst ADDED
@@ -0,0 +1,411 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ =================
2
+ Iterated Prisoner's Dilemma
3
+ =================
4
+
5
+ The Iterated Prisoner's Dilemma environment provides a classic game theory setting for studying cooperation
6
+ and competition between agents. This document describes the API for interacting with the IPD environment
7
+ and its associated agent handler.
8
+
9
+ Overview
10
+ --------
11
+
12
+ The Prisoner's Dilemma is a fundamental problem in game theory that demonstrates why two rational individuals might not
13
+ cooperate, even when it appears in their best interest to do so. In the iterated version, the same two players
14
+ repeatedly face the same dilemma, allowing for the development of trust or retaliation based on previous interactions.
15
+
16
+ Our implementation follows the Multi-Agent Negotiation Environment standard, allowing it to be used with
17
+ LLM agents through a text-based interface.
18
+
19
+ Game Rules
20
+ ----------
21
+
22
+ ### Basic Premise
23
+
24
+ The scenario behind the Prisoner's Dilemma is as follows:
25
+
26
+ Two criminals are arrested and imprisoned. Each prisoner is in solitary confinement with no means of communicating with
27
+ the other. The prosecutors lack sufficient evidence to convict the pair on the principal charge, but they have enough
28
+ to convict both on a lesser charge. Simultaneously, the prosecutors offer each prisoner a bargain:
29
+
30
+ - If both prisoners betray each other, each serves 2 years in prison (the "punishment" payoff)
31
+ - If one betrays the other while the other remains silent, the betrayer goes free (the "temptation" payoff) while the
32
+ silent accomplice serves 3 years (the "sucker" payoff)
33
+ - If both remain silent, each serves only 1 year in prison (the "reward" payoff)
34
+
35
+ ### Game Mechanics
36
+
37
+ In our implementation, the choices are simplified to:
38
+ - **C**: Cooperate (remain silent)
39
+ - **D**: Defect (betray the other prisoner)
40
+
41
+ Each round, both players simultaneously choose either C or D, and receive points based on the combination of their choices:
42
+
43
+ - Both choose C: Both receive the "reward" payoff (3 points by default)
44
+ - Both choose D: Both receive the "punishment" payoff (1 point by default)
45
+ - One chooses C, one chooses D: The defector receives the "temptation" payoff (5 points by default), while the cooperator
46
+ receives the "sucker" payoff (0 points by default)
47
+
48
+ ### Example: Single Round
49
+
50
+ Let's see how a single round plays out:
51
+
52
+ 1. Alice and Bob simultaneously make their choices
53
+ 2. If Alice chooses C and Bob chooses C:
54
+ - Alice receives 3 points
55
+ - Bob receives 3 points
56
+ 3. If Alice chooses C and Bob chooses D:
57
+ - Alice receives 0 points
58
+ - Bob receives 5 points
59
+ 4. If Alice chooses D and Bob chooses C:
60
+ - Alice receives 5 points
61
+ - Bob receives 0 points
62
+ 5. If Alice chooses D and Bob chooses D:
63
+ - Alice receives 1 point
64
+ - Bob receives 1 point
65
+
66
+ ### Iterated Game Structure
67
+
68
+ The iterated version repeats this basic game for a fixed number of rounds. The key features are:
69
+
70
+ 1. Players know the total number of rounds in advance
71
+ 2. After each round, players learn what choice the other player made
72
+ 3. Players maintain a cumulative score across all rounds
73
+ 4. Players can adjust their strategy based on the history of previous interactions
74
+
75
+ ### Game Variations
76
+
77
+ The IPD environment supports several variations through configuration parameters:
78
+
79
+ #### Different Payoff Matrices
80
+
81
+ The standard payoff values can be modified to create different incentive structures:
82
+ - **Traditional PD**: reward=3, punishment=1, temptation=5, sucker=0
83
+ - **Weak Temptation**: reward=3, punishment=1, temptation=4, sucker=0 (reduces the incentive to defect)
84
+ - **Harsh Punishment**: reward=3, punishment=0, temptation=5, sucker=0 (increases the cost of mutual defection)
85
+ - **Generous**: reward=4, punishment=2, temptation=5, sucker=1 (cushions the blow of being betrayed)
86
+
87
+ #### Game Length Variations
88
+
89
+ The number of rounds can significantly impact strategy:
90
+ - **Short Games** (5-10 rounds): Incentivizes more defection, especially near the end
91
+ - **Medium Games** (20-50 rounds): Allows for the development of tit-for-tat and forgiveness strategies
92
+ - **Long Games** (100+ rounds): Favors steady cooperation with occasional "probing" defections
93
+
94
+ ### Common Strategies
95
+
96
+ While not enforced by the environment, several well-known strategies can emerge:
97
+ - **Always Cooperate**: Always choose C
98
+ - **Always Defect**: Always choose D
99
+ - **Tit for Tat**: Start with C, then copy what the opponent did in the previous round
100
+ - **Forgiving Tit for Tat**: Like Tit for Tat, but occasionally cooperate even after being defected against
101
+ - **Grudger**: Cooperate until the opponent defects once, then always defect
102
+ - **Random**: Choose randomly between C and D
103
+
104
+ IPDEnv
105
+ ------
106
+
107
+ The ``IPDEnv`` class provides an interface to the Iterated Prisoner's Dilemma environment that follows the
108
+ Multi-Agent Negotiation Environment standard.
109
+
110
+ .. code-block:: python
111
+
112
+ class IPDEnv:
113
+ """
114
+ Iterated Prisoner's Dilemma environment following the MarlEnvironment standard.
115
+
116
+ In each round of the game, two agents simultaneously choose to either cooperate (C) or defect (D).
117
+ The payoffs are as follows:
118
+ - If both cooperate: Both receive the "reward" (usually 3 points)
119
+ - If both defect: Both receive the "punishment" (usually 1 point)
120
+ - If one cooperates and one defects: The defector receives the "temptation" (usually 5 points)
121
+ and the cooperator receives the "sucker" payoff (usually 0 points)
122
+
123
+ The game is played for a specified number of rounds.
124
+ """
125
+
126
+ def __init__(
127
+ self,
128
+ rounds_per_game: int = 10,
129
+ reward: float = 3.0, # Both cooperate
130
+ punishment: float = 1.0, # Both defect
131
+ temptation: float = 5.0, # Defector's reward when other cooperates
132
+ sucker: float = 0.0, # Cooperator's reward when other defects
133
+ random_seed: Optional[int] = None,
134
+ ):
135
+ """
136
+ Initialize the Iterated Prisoner's Dilemma environment.
137
+
138
+ Args:
139
+ rounds_per_game: Number of rounds to play
140
+ reward: Payoff when both agents cooperate
141
+ punishment: Payoff when both agents defect
142
+ temptation: Payoff for defecting when other agent cooperates
143
+ sucker: Payoff for cooperating when other agent defects
144
+ seed: Random seed for reproducibility
145
+ """
146
+ # ...
147
+
148
+ def reset(self) -> Dict[str, Dict[str, Any]]:
149
+ """
150
+ Reset the environment to an initial state and return the initial observation.
151
+
152
+ Returns:
153
+ observation (dict): A dictionary where keys are agent identifiers and values are observations.
154
+ """
155
+ # ...
156
+
157
+ def step(self, actions: Dict[str, str]) -> Tuple[Dict[str, Dict[str, Any]], bool, Dict[str, Any]]:
158
+ """
159
+ Take a step in the environment using the provided actions.
160
+
161
+ Args:
162
+ actions (dict): A dictionary where keys are agent identifiers and values are actions ('C' or 'D').
163
+
164
+ Returns:
165
+ observations (dict): A dictionary where keys are agent identifiers and values are observations.
166
+ done (bool): Whether the episode has ended.
167
+ info (dict): Additional information about the environment.
168
+ """
169
+ # ...
170
+
171
+ Key Implementation Details
172
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
173
+
174
+ The ``IPDEnv`` class implements several key features:
175
+
176
+ 1. **Two-Agent Support**: The environment tracks two agents ("alice" and "bob") and manages their interactions.
177
+
178
+ 2. **Round-Based Play**: The environment enforces turn structure and tracks game history.
179
+
180
+ 3. **Payoff Matrix**: The environment calculates rewards based on the standard prisoner's dilemma payoff matrix.
181
+
182
+ 4. **Observation Generation**: The environment generates detailed observations for each agent, including action history and rewards.
183
+
184
+ 5. **Game Termination**: The environment tracks game termination after the specified number of rounds.
185
+
186
+ Observation Structure
187
+ ~~~~~~~~~~~~~~~~~~~~
188
+
189
+ Each agent receives an observation dictionary with the following structure:
190
+
191
+ .. code-block:: python
192
+
193
+ {
194
+ "current_round": int, # Current round number (0-indexed)
195
+ "rounds_per_game": int, # Total number of rounds in the game
196
+ "history": List[Dict], # Complete game history so far
197
+ "last_round_actions": Dict[str, str], # Actions from the previous round (if any)
198
+ "last_round_reward": float, # Reward received in the previous round (if any)
199
+ "total_reward": float, # Cumulative reward so far
200
+ "payoff_matrix": Dict[str, float], # The game's payoff matrix values
201
+ }
202
+
203
+ Action Structure
204
+ ~~~~~~~~~~~~~~~
205
+
206
+ Actions are simple strings:
207
+
208
+ 1. ``"C"`` for Cooperate
209
+ 2. ``"D"`` for Defect
210
+
211
+ IPDAgent
212
+ --------------
213
+
214
+ The ``IPDAgent`` class implements the agent handler interface for the Iterated Prisoner's Dilemma, processing observations from the environment and generating actions through an LLM.
215
+
216
+ .. code-block:: python
217
+
218
+ class IPDAgent:
219
+ """
220
+ Agent handler for Iterated Prisoner's Dilemma, implementing the AgentState interface
221
+ for the multi-agent negotiation standard.
222
+ """
223
+
224
+ def __init__(
225
+ self,
226
+ agent_id: str,
227
+ policy_id: str = "llm_policy",
228
+ system_prompt: Optional[str] = None,
229
+ max_errors: int = 3,
230
+ opponent_id: Optional[str] = None,
231
+ ):
232
+ """
233
+ Initialize the IPD agent handler.
234
+
235
+ Args:
236
+ agent_id: Identifier for this agent ("alice" or "bob")
237
+ policy_id: Identifier for the policy this agent uses
238
+ system_prompt: Optional custom system prompt for the LLM
239
+ max_errors: Maximum number of parsing errors before defaulting to cooperate
240
+ opponent_id: Optional identifier of the opponent (inferred if not provided)
241
+ """
242
+ # ...
243
+
244
+ def step(self, observation_from_env: Dict[str, Any], policy_output: str = None) -> Tuple[str, Dict[str, Any], str, bool, Dict[str, Any]]:
245
+ """
246
+ Update the agent state based on the observation and process the policy output.
247
+
248
+ Args:
249
+ observation_from_env: The observation from the environment
250
+ policy_output: The output from the policy (LLM response)
251
+
252
+ Returns:
253
+ policy_id: The policy identifier
254
+ policy_input: The input to the policy
255
+ action: The action to be sent to the environment
256
+ done: Whether the action is ready to be sent to the environment
257
+ info: Additional information about the agent
258
+ """
259
+ # ...
260
+
261
+ Key Implementation Details
262
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
263
+
264
+ The ``IPDAgent`` class implements several key features:
265
+
266
+ 1. **LLM Interaction**: The agent generates prompts for an LLM and processes the LLM's responses.
267
+
268
+ 2. **Action Extraction**: The agent parses the LLM's output to extract valid actions (C or D).
269
+
270
+ 3. **Error Handling**: The agent provides helpful error messages when parsing fails and defaults to cooperation after multiple failures.
271
+
272
+ 4. **History Tracking**: The agent maintains and provides the complete game history in its prompts.
273
+
274
+ 5. **Strategy Explanation**: The agent can extract and log the reasoning behind an LLM's decisions.
275
+
276
+ Prompt Structure
277
+ ~~~~~~~~~~~~~~~
278
+
279
+ The agent generates prompts that include:
280
+
281
+ 1. **System Prompt**: Instructions and context for the LLM, explaining its role and the rules of the Prisoner's Dilemma.
282
+
283
+ 2. **Game State Description**: A text description of the current game state, including:
284
+ - Current round number
285
+ - History of previous rounds (if any)
286
+ - Cumulative score
287
+
288
+ 3. **Action Request**: Instructions on how to format the response, requiring an explicit action tag.
289
+
290
+ Example system prompt:
291
+
292
+ .. code-block:: text
293
+
294
+ You are playing as Alice in an Iterated Prisoner's Dilemma game against Bob.
295
+ In each round, you must choose to either Cooperate (C) or Defect (D).
296
+
297
+ The payoffs are:
298
+ - If both players Cooperate: You each get 3 points
299
+ - If both players Defect: You each get 1 point
300
+ - If you Cooperate and Bob Defects: You get 0 points, Bob gets 5 points
301
+ - If you Defect and Bob Cooperates: You get 5 points, Bob gets 0 points
302
+
303
+ Your goal is to maximize your total points across all rounds.
304
+ The game will last for exactly 10 rounds, and both players know this.
305
+
306
+ Example game state prompt:
307
+
308
+ .. code-block:: text
309
+
310
+ Current round: 3/10
311
+
312
+ History:
313
+ Round 1: You chose C, Bob chose C. You earned 3 points.
314
+ Round 2: You chose C, Bob chose D. You earned 0 points.
315
+
316
+ Your total score so far: 3 points
317
+
318
+ What is your choice for round 3?
319
+ Please respond with <action>C</action> to cooperate or <action>D</action> to defect,
320
+ and explain your reasoning.
321
+
322
+ Running IPD Games
323
+ ----------------------
324
+
325
+ To run Iterated Prisoner's Dilemma games with LLM agents, you can use the following code structure:
326
+
327
+ .. code-block:: python
328
+
329
+ from mllm.environments.ipd.ipd_game import IPDEnv
330
+ from mllm.environments.ipd.ipd_agent import IPDAgent
331
+ from mllm.run_matches import run_batched_matches
332
+
333
+ # Create environment
334
+ env = IPDEnv(
335
+ rounds_per_game=10,
336
+ reward=3.0,
337
+ punishment=1.0,
338
+ temptation=5.0,
339
+ sucker=0.0
340
+ )
341
+
342
+ # Create agent handlers
343
+ agent_handlers = {
344
+ "alice": IPDAgent(agent_id="alice"),
345
+ "bob": IPDAgent(agent_id="bob")
346
+ }
347
+
348
+ # Define policy mapping
349
+ policy_mapping = {
350
+ "llm_policy": my_llm_policy_function
351
+ }
352
+
353
+ # Run the game
354
+ game_results = run_batched_matches(
355
+ envs=[env],
356
+ agent_handlers_per_env=[agent_handlers],
357
+ policy_mapping=policy_mapping,
358
+ max_parallel_matches=1
359
+ )
360
+
361
+ # Process results
362
+ for result in game_results:
363
+ print(f"Game finished. Scores: {result['total_rewards']}")
364
+
365
+ Statistics and Analysis
366
+ ----------------------
367
+
368
+ The IPD environment includes utility functions for analyzing game outcomes:
369
+
370
+ 1. **Cooperation Rates**: Percentage of rounds where each agent cooperated.
371
+ 2. **Mutual Cooperation/Defection**: Percentage of rounds where both agents made the same choice.
372
+ 3. **Score Distribution**: Analysis of how points were accumulated over the game.
373
+
374
+ These statistics can be calculated using the ``gather_ipd_statistics`` function:
375
+
376
+ .. code-block:: python
377
+
378
+ from mllm.environments.ipd.ipd_statistics_funcs import gather_ipd_statistics
379
+
380
+ stats = gather_ipd_statistics(match_info, env_info)
381
+ print(f"Cooperation rates: {stats['cooperation_rate']}")
382
+ print(f"Mutual cooperation rate: {stats['mutual_cooperation_rate']}")
383
+ print(f"Mutual defection rate: {stats['mutual_defection_rate']}")
384
+
385
+ Limitations and Considerations
386
+ -----------------------------
387
+
388
+ 1. **Determinism**: The environment is deterministic, with randomness only in initialization if a seed is provided.
389
+
390
+ 2. **Limited Player Count**: The IPD environment only supports exactly two players.
391
+
392
+ 3. **Perfect Information**: Both players have perfect information about the game history.
393
+
394
+ 4. **Simultaneous Actions**: Both players act simultaneously, which requires adaptations for some LLM interfaces.
395
+
396
+ 5. **Fixed Game Length**: The total number of rounds is fixed and known to both players from the start.
397
+
398
+ Advanced Usage
399
+ ------------
400
+
401
+ For advanced usage, you can customize:
402
+
403
+ 1. **Payoff Matrix**: Modify reward values to create different incentive structures.
404
+
405
+ 2. **System Prompts**: Customize the LLM's understanding of the game and potential strategies.
406
+
407
+ 3. **Error Handling**: Adjust how the agent responds to invalid LLM outputs.
408
+
409
+ 4. **Analysis**: Create custom statistics gathering for specific research questions.
410
+
411
+ 5. **Integration**: Connect the IPD environment to other negotiation frameworks or tournament systems.
src_code_for_reproducibility/docs/source/index.rst ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Welcome to LLM Negotiation's documentation!
2
+ ===========================================
3
+ This library is a collection of tools for training and evaluating LLM-based agents in multi-agent environments. It is designed to be easy to use and extend.
4
+
5
+ .. toctree::
6
+ :maxdepth: 3
7
+ :caption: Contents:
8
+
9
+ installation
10
+ marl_standard
11
+ environments
12
+ launch
13
+ usage
14
+ modules
15
+ contributing
16
+
17
+ Indices and tables
18
+ ==================
19
+
20
+ * :ref:`genindex`
21
+ * :ref:`modindex`
22
+ * :ref:`search`
src_code_for_reproducibility/docs/source/installation.rst ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ Installation
2
+ ===========
3
+
4
+ To install the package, run:
5
+
6
+ .. code-block:: bash
7
+
8
+ git clone https://github.com/yourusername/llm_negotiation.git
9
+ cd llm_negotiation
10
+ pip install -e .
src_code_for_reproducibility/docs/source/launch.rst ADDED
File without changes
src_code_for_reproducibility/docs/source/marl_standard.rst ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ =================
2
+ Abstract Standard for Multi-Agent Negotiation Environments
3
+ =================
4
+
5
+ Multi-Agent Negotiation Environments require more features than gymnasium environments in order to be used as interfaces in general game running code.
6
+ The two fundamental differences between gymnasium environments and Multi-Agent Negotiation Environments are:
7
+
8
+ 1. Response from the LLM is a text action, not a discrete action. Therefore, appropriate parsing of the text is required. The model may need to be run multiple times to get the full action.
9
+ This is why we introduce the `AgentHandler` class, which is responsible for parsing the LLM's response.
10
+ 2. The environment needs to be able to handle multi-agent interactions.
11
+ This is why we introduce the `NegotiationEnvironment` class, which is responsible for handling the multi-agent interactions.
12
+ 3. MARL environments are complex to describe. In different contexts, the same environment may be described differently. Therefore, both the environement and the agent handlers are
13
+ responsible for describing a particular trajectory. This information is given by the `get_log_info` method.
14
+ 4. There might be a lot of overlap between the neural networks used by each agent. For instance, the same model may be used for all agents. This motivates a requirement for a
15
+ policy identifier for each agent.
16
+
17
+ Taking inspiration from the `gymnasium <https://gymnasium.farama.org/>`_ library, we introduce a new standard for Multi-Agent Negotiation Environments.
18
+
19
+ Our standard is based on the following features:
20
+
21
+ Environments are of the form:
22
+
23
+ .. code-block:: python
24
+
25
+ class MarlEnvironment():
26
+
27
+ def __init__(self):
28
+ """Initialize the environment."""
29
+ pass
30
+
31
+ def reset(self):
32
+ """Reset the environment to an initial state and return the initial observation.
33
+ Returns:
34
+ observation (dict): A dictionary where keys are agent identifiers and values are observations.
35
+ """
36
+ # (...)
37
+ return observation
38
+
39
+ def step(self, actions):
40
+ """Take a step in the environment using the provided actions.
41
+
42
+ Args:
43
+ actions (dict): A dictionary where keys are agent identifiers and values are actions.
44
+
45
+ Returns:
46
+ observations (dict): A dictionary where keys are agent identifiers and values are observations.
47
+ reward (dict): A dictionary where keys are agent identifiers and values are rewards.
48
+ done (bool): Whether the episode has ended.
49
+ info (dict): Additional information about the environment.
50
+ """
51
+ # (...)
52
+ return observations, done, info
53
+
54
+ def get_log_info(self):
55
+ """Get additional information about the environment. This information is used to log the game.
56
+ Returns:
57
+ log_info (dict): Information about the environment required to log the game.
58
+ """
59
+ # (...)
60
+ return log_info
61
+
62
+ def render(self):
63
+ """Render the current state of the environment."""
64
+ pass
65
+
66
+ def close(self):
67
+ """Perform any necessary cleanup."""
68
+ pass
69
+
70
+
71
+ class AgentState():
72
+
73
+ def __init__(self):
74
+ """Initialize the agent state."""
75
+ pass
76
+
77
+ def step(self, observation_from_env, policy_output=None):
78
+ """Update the agent state based on the observation and action.
79
+ The action is the output of the LLM.
80
+ """
81
+
82
+ Args:
83
+ observation_from_env (dict): The observation of the environment.
84
+ policy_output : The output of the policy.
85
+
86
+ Returns:
87
+ policy_id (str): The policy identifier.
88
+ policy_input (dict): The input to the policy.
89
+ action : The official action to be sent to the environment.
90
+ done (bool): Whether the LLM action is ready to be sent to the environment.
91
+ info (dict): Additional information about the agent.
92
+ """
93
+ # (...)
94
+ return policy_id, policy_input, action, done, info
95
+
96
+ def get_log_info(self):
97
+ """Get information about the agent required to log a trajectory.
98
+ Returns:
99
+ log_info (dict): Information about the agent required to log a trajectory.
100
+ """
101
+ # (...)
102
+ return log_info
103
+
104
+ def render(self):
105
+ """Render the current state of the environment."""
106
+ pass
107
+
108
+ def close(self):
109
+ """Perform any necessary cleanup."""
110
+ pass
111
+
112
+
113
+ Implicitely, the keys of the `observations` in the `step` method of the `MarlEnvironment` interface represent the set of agents from which an action is expected at the current step. The next step should only expect actions from the agents in the `observations` dictionary.
114
+
115
+ As you can see, both classes have a `get_log_info` method. This method is used to log the game. It returns a dictionary with keys being the agent identifiers and values being the information to log. The reason we need this is because the environment and the agent handler may need to log different information. It makes it easier to log from the perspective of each agent. The core environment class should not need to know about the details of the agent handler.
116
+
117
+
118
+
119
+ Running Environments in Parallel
120
+ --------------------------------
121
+ This standard allows the use of the `run_batched_matches` function (TODO: link) to run environments in an efficient way. The core idea is to batch the policy calls for all agents in the environment.
122
+
123
+ .. note::
124
+ The ``run_batched_matches`` function allows you to run multiple negotiation games, or "matches," in parallel.
125
+ After each environment is initialized, the function continuously loops over all active matches and checks which agents
126
+ are still pending actions. Each agent's logic can require multiple calls to the policy (e.g., an LLM) before an action
127
+ becomes "ready" to be sent to the environment. (For instance, an agent might need multiple policy calls before having a string which can be parsed into a valid action.) While an agent is waiting for a policy output, these calls for all agents across all matches are grouped together by unique policy identifier and processed in batch for efficiency. This is the core functionality of the ``run_batched_matches`` function.
128
+
129
+ Only once all actions from the required agents at a given step for an environment are ready does the function make a single ``env.step(...)`` call; this ensures
130
+ every match moves forward in lockstep for all its active agents. As soon as an environment signals it is done, the function
131
+ retrieves logged information from both the environment and the agent states before removing this match from the active set.
132
+
133
+ If there are more matches waiting to be processed, they are then started one by one to maintain the specified degree of parallelism.
134
+ This batching approach provides an efficient mechanism to handle multi-agent or multi-policy environments, ensuring minimal
135
+ overhead and a clear, unified flow for stepping through matches.
136
+
137
+ Here is a diagram that shows how the `run_batched_matches` function works at a high level:
138
+
139
+ .. image:: media/runbatch.png
140
+ :alt: Alternate text for the image
141
+ :width: 1000px
src_code_for_reproducibility/docs/source/media/runbatch.png ADDED
src_code_for_reproducibility/docs/source/src.environments.dond.dond_agent.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ src.environments.dond.dond\_agent module
2
+ ========================================
3
+
4
+ .. automodule:: src.environments.dond.dond_agent
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
src_code_for_reproducibility/docs/source/src.environments.dond.dond_game.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ src.environments.dond.dond\_game module
2
+ =======================================
3
+
4
+ .. automodule:: src.environments.dond.dond_game
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
src_code_for_reproducibility/docs/source/src.environments.dond.dond_log_funcs.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ src.environments.dond.dond\_log\_funcs module
2
+ =============================================
3
+
4
+ .. automodule:: src.environments.dond.dond_log_funcs
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
src_code_for_reproducibility/docs/source/src.environments.dond.dond_player.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ src.environments.dond.dond\_agent module
2
+ =========================================
3
+
4
+ .. automodule:: src.environments.dond.dond_agent
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
src_code_for_reproducibility/docs/source/src.environments.dond.dond_return_funcs.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ src.environments.dond.dond\_return\_funcs module
2
+ ================================================
3
+
4
+ .. automodule:: src.environments.dond.dond_return_funcs
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
src_code_for_reproducibility/docs/source/src.environments.dond.dond_training_data_funcs.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ src.environments.dond.dond\_training\_data\_funcs module
2
+ ========================================================
3
+
4
+ .. automodule:: src.environments.dond.dond_training_data_funcs
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
src_code_for_reproducibility/docs/source/src.environments.env_imports.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ src.environments.env\_imports module
2
+ ====================================
3
+
4
+ .. automodule:: src.environments.env_imports
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
src_code_for_reproducibility/docs/source/src.environments.environment_imports.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ src.environments.environment\_imports module
2
+ ============================================
3
+
4
+ .. automodule:: src.environments.environment_imports
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
src_code_for_reproducibility/docs/source/src.environments.ipd.ipd_agent.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ src.environments.ipd.ipd\_agent module
2
+ ======================================
3
+
4
+ .. automodule:: src.environments.ipd.ipd_agent
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
src_code_for_reproducibility/docs/source/src.environments.ipd.ipd_game.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ src.environments.ipd.ipd\_game module
2
+ =====================================
3
+
4
+ .. automodule:: src.environments.ipd.ipd_game
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
src_code_for_reproducibility/docs/source/src.environments.ipd.ipd_statistics_funcs.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ src.environments.ipd.ipd\_statistics\_funcs module
2
+ ==================================================
3
+
4
+ .. automodule:: src.environments.ipd.ipd_statistics_funcs
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
src_code_for_reproducibility/docs/source/src.environments.rst ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ src.environments package
2
+ ========================
3
+
4
+ .. automodule:: src.environments
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
8
+
9
+ Subpackages
10
+ -----------
11
+
12
+ .. toctree::
13
+ :maxdepth: 4
14
+
15
+ src.environments.dond
16
+ src.environments.ipd
17
+
18
+ Submodules
19
+ ----------
20
+
21
+ .. toctree::
22
+ :maxdepth: 4
23
+
24
+ src.environments.env_imports
25
+ src.environments.environment_imports
src_code_for_reproducibility/docs/source/src.experiments.arithmetic_test.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ src.experiments.arithmetic\_test module
2
+ =======================================
3
+
4
+ .. automodule:: src.experiments.arithmetic_test
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
src_code_for_reproducibility/docs/source/src.experiments.last_completion.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ src.experiments.last\_completion module
2
+ =======================================
3
+
4
+ .. automodule:: src.experiments.last_completion
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
src_code_for_reproducibility/docs/source/src.generation.rst ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ src.generation package
2
+ ======================
3
+
4
+ .. automodule:: src.generation
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
8
+
9
+ Submodules
10
+ ----------
11
+
12
+ .. toctree::
13
+ :maxdepth: 4
14
+
15
+ src.generation.run_games
src_code_for_reproducibility/docs/source/src.models.dummy_local_llm.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ src.models.dummy\_local\_llm module
2
+ ===================================
3
+
4
+ .. automodule:: src.models.dummy_local_llm
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
src_code_for_reproducibility/docs/source/src.models.local_llm.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ src.models.local\_llm module
2
+ ============================
3
+
4
+ .. automodule:: src.models.local_llm
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
src_code_for_reproducibility/docs/source/src.models.new_local_llm.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ src.models.new\_local\_llm module
2
+ =================================
3
+
4
+ .. automodule:: src.models.new_local_llm
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
src_code_for_reproducibility/docs/source/src.models.rst ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ src.models package
2
+ ==================
3
+
4
+ .. automodule:: src.models
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
8
+
9
+ Submodules
10
+ ----------
11
+
12
+ .. toctree::
13
+ :maxdepth: 4
14
+
15
+ src.models.dummy_local_llm
16
+ src.models.local_llm
17
+ src.models.new_local_llm
18
+ src.models.server_llm
19
+ src.models.updatable_worker
20
+ src.models.vllm_worker_wrap
src_code_for_reproducibility/docs/source/src.models.updatable_worker.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ src.models.updatable\_worker module
2
+ ===================================
3
+
4
+ .. automodule:: src.models.updatable_worker
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
src_code_for_reproducibility/docs/source/src.training.reinforce_training.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ src.training.reinforce\_training module
2
+ =======================================
3
+
4
+ .. automodule:: src.training.reinforce_training
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
src_code_for_reproducibility/docs/source/src.training.rl_convs_processing.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ src.training.rl\_convs\_processing module
2
+ =========================================
3
+
4
+ .. automodule:: src.training.rl_convs_processing
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
src_code_for_reproducibility/docs/source/src.training.rst ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ src.training package
2
+ ====================
3
+
4
+ .. automodule:: src.training
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
8
+
9
+ Submodules
10
+ ----------
11
+
12
+ .. toctree::
13
+ :maxdepth: 4
14
+
15
+ src.training.ppo_train
16
+ src.training.ppo_train_value_head
17
+ src.training.reinforce_training
18
+ src.training.rl_convs_processing
19
+ src.training.train_main
src_code_for_reproducibility/docs/source/src.utils.log_gpu_usage.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ src.utils.log\_gpu\_usage module
2
+ ================================
3
+
4
+ .. automodule:: src.utils.log_gpu_usage
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
src_code_for_reproducibility/docs/source/src.utils.rst ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ src.utils package
2
+ =================
3
+
4
+ .. automodule:: src.utils
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
8
+
9
+ Submodules
10
+ ----------
11
+
12
+ .. toctree::
13
+ :maxdepth: 4
14
+
15
+ src.utils.common_imports
16
+ src.utils.export_ppo_training_set
17
+ src.utils.extra_stats
18
+ src.utils.inherit_args
19
+ src.utils.log_gpu_usage
20
+ src.utils.log_statistics
21
+ src.utils.model_to_cpu
22
+ src.utils.parallel_shuffle
23
+ src.utils.quick_stats
24
+ src.utils.update_start_epoch
src_code_for_reproducibility/docs/source/src.utils.update_start_epoch.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ src.utils.update\_start\_epoch module
2
+ =====================================
3
+
4
+ .. automodule:: src.utils.update_start_epoch
5
+ :members:
6
+ :undoc-members:
7
+ :show-inheritance:
src_code_for_reproducibility/docs/source/usage.rst ADDED
File without changes
src_code_for_reproducibility/markov_games/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (173 Bytes). View file
 
src_code_for_reproducibility/markov_games/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (161 Bytes). View file
 
src_code_for_reproducibility/markov_games/__pycache__/agent.cpython-312.pyc ADDED
Binary file (3.2 kB). View file
 
src_code_for_reproducibility/markov_games/__pycache__/alternative_actions_runner.cpython-312.pyc ADDED
Binary file (4.95 kB). View file
 
src_code_for_reproducibility/markov_games/__pycache__/gather_and_export_utils.cpython-312.pyc ADDED
Binary file (46.5 kB). View file
 
src_code_for_reproducibility/markov_games/__pycache__/group_timesteps.cpython-312.pyc ADDED
Binary file (6.17 kB). View file
 
src_code_for_reproducibility/markov_games/__pycache__/linear_runner.cpython-312.pyc ADDED
Binary file (1.25 kB). View file
 
src_code_for_reproducibility/markov_games/__pycache__/markov_game.cpython-312.pyc ADDED
Binary file (9.72 kB). View file
 
src_code_for_reproducibility/markov_games/__pycache__/rollout_tree.cpython-311.pyc ADDED
Binary file (4.75 kB). View file
 
src_code_for_reproducibility/markov_games/__pycache__/rollout_tree.cpython-312.pyc ADDED
Binary file (3.67 kB). View file
 
src_code_for_reproducibility/markov_games/__pycache__/run_markov_games.cpython-312.pyc ADDED
Binary file (1.14 kB). View file
 
src_code_for_reproducibility/markov_games/__pycache__/simulation.cpython-312.pyc ADDED
Binary file (3.9 kB). View file
 
src_code_for_reproducibility/markov_games/diplomacy/diplomacy_agent.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Tuple, Optional, Any
2
+ import copy
3
+
4
+ class DiplomacyAgent:
5
+ """Agent handler for Diplomacy game that follows the MARL standard.
6
+
7
+ This class is responsible for parsing LLM output into valid Diplomacy orders,
8
+ managing the agent state, and providing information for logging.
9
+ """
10
+
11
+ def __init__(self, policy_id: str, power_name: str, random_valid_move=False):
12
+ """Initialize the agent handler for a power in the Diplomacy game.
13
+
14
+ Args:
15
+ power_name: The name of the power this agent controls (e.g., 'FRANCE', 'ENGLAND')
16
+ policy_id: The identifier for the policy this agent uses
17
+ random_valid_move: If True, will select random valid moves instead of using LLM (default: False)
18
+ """
19
+ self.policy_id = policy_id
20
+ self.power_name = power_name
21
+ self.orders = []
22
+ self.wait = True
23
+ self.processing_state = "WAITING_FOR_ORDERS"
24
+ self.parsed_orders = []
25
+ self.order_status = {}
26
+ self.message_history = []
27
+ self.random_valid_move = random_valid_move
28
+
29
+ def step(self, observation_from_env, policy_output=None):
30
+ """Update the agent state based on the observation and LLM output.
31
+
32
+ Args:
33
+ observation_from_env: The observation from the environment
34
+ policy_output: The output from the LLM
35
+
36
+ Returns:
37
+ policy_id: The policy identifier
38
+ policy_input: The input to the policy
39
+ action: The official action to be sent to the environment
40
+ done: Whether the LLM action is ready to be sent to the environment
41
+ info: Additional information about the agent
42
+ """
43
+ info = {}
44
+
45
+ # If random_valid_move is enabled, select random valid moves
46
+ if self.random_valid_move:
47
+ valid_orders = self._select_random_valid_moves(observation_from_env)
48
+ self.orders = valid_orders
49
+ self.wait = False
50
+ action = {
51
+ "orders": valid_orders,
52
+ "wait": False
53
+ }
54
+ return self.policy_id, {}, action, True, info
55
+
56
+ # If no policy output, this is the initial step - prepare prompt
57
+ if policy_output is None:
58
+ # Create initial prompt for the LLM
59
+ phase = observation_from_env.get('phase', '')
60
+ units = observation_from_env.get('units', {}).get(self.power_name, [])
61
+ centers = observation_from_env.get('centers', {}).get(self.power_name, [])
62
+ orderable_locations = observation_from_env.get('orderable_locations', {})
63
+
64
+ prompt = self._create_prompt(phase, units, centers, orderable_locations)
65
+
66
+ return self.policy_id, {"prompt": prompt}, None, False, info
67
+
68
+ # Process the LLM output to extract orders
69
+ success, parsed_orders = self._parse_llm_output(policy_output)
70
+ self.parsed_orders = parsed_orders
71
+
72
+ if not success:
73
+ # Need more information from LLM
74
+ clarification_prompt = self._create_clarification_prompt(policy_output, parsed_orders)
75
+ return self.policy_id, {"prompt": clarification_prompt}, None, False, info
76
+
77
+ # Validate if the orders are valid for the current phase
78
+ valid_orders = self._validate_orders(parsed_orders, observation_from_env)
79
+
80
+ if valid_orders:
81
+ # Orders are valid, prepare action for environment
82
+ self.orders = valid_orders
83
+ self.wait = False
84
+ action = {
85
+ "orders": valid_orders,
86
+ "wait": False
87
+ }
88
+ return self.policy_id, {}, action, True, info
89
+ else:
90
+ # Orders are invalid, ask for new ones
91
+ error_prompt = self._create_error_prompt(parsed_orders, observation_from_env)
92
+ return self.policy_id, {"prompt": error_prompt}, None, False, info
93
+
94
+ def _create_prompt(self, phase, units, centers, orderable_locations):
95
+ """Create the initial prompt for the LLM.
96
+
97
+ Args:
98
+ phase: The current game phase
99
+ units: List of units controlled by this power
100
+ centers: List of supply centers controlled by this power
101
+ orderable_locations: List of locations where orders can be issued
102
+
103
+ Returns:
104
+ A prompt string for the LLM
105
+ """
106
+ prompt = f"You are playing as {self.power_name} in Diplomacy. The current phase is {phase}.\n\n"
107
+ prompt += f"Your units: {', '.join(units)}\n"
108
+ prompt += f"Your supply centers: {', '.join(centers)}\n"
109
+ prompt += f"Locations you can order: {', '.join(orderable_locations)}\n\n"
110
+
111
+ if phase.endswith('M'): # Movement phase
112
+ prompt += "Please provide orders for your units in the form:\n"
113
+ prompt += "- A LON H (hold)\n"
114
+ prompt += "- F NTH - NWY (move)\n"
115
+ prompt += "- A WAL S F LON (support)\n"
116
+ prompt += "- F NWG C A NWY - EDI (convoy)\n"
117
+ elif phase.endswith('R'): # Retreat phase
118
+ prompt += "Please provide retreat orders for your dislodged units:\n"
119
+ prompt += "- A PAR R MAR (retreat to MAR)\n"
120
+ prompt += "- A PAR D (disband)\n"
121
+ elif phase.endswith('A'): # Adjustment phase
122
+ if len(units) < len(centers):
123
+ prompt += "You can build units. Please provide build orders:\n"
124
+ prompt += "- A PAR B (build army in PAR)\n"
125
+ prompt += "- F BRE B (build fleet in BRE)\n"
126
+ prompt += "- WAIVE (waive a build)\n"
127
+ elif len(units) > len(centers):
128
+ prompt += "You must remove units. Please provide disbandment orders:\n"
129
+ prompt += "- A PAR D (disband army in PAR)\n"
130
+ prompt += "- F BRE D (disband fleet in BRE)\n"
131
+
132
+ prompt += "\nProvide your orders as a list, one per line."
133
+ return prompt
134
+
135
+ def _parse_llm_output(self, llm_output):
136
+ """Parse the LLM output to extract orders.
137
+
138
+ Args:
139
+ llm_output: The raw output from the LLM
140
+
141
+ Returns:
142
+ success: Whether parsing was successful
143
+ parsed_orders: List of parsed orders
144
+ """
145
+ # Simple parsing for now - extract lines that look like orders
146
+ lines = llm_output.strip().split('\n')
147
+ orders = []
148
+
149
+ for line in lines:
150
+ # Remove list markers, hyphens, etc.
151
+ line = line.strip('- *•').strip()
152
+
153
+ # Skip empty lines and lines that don't look like orders
154
+ if not line or line.startswith('I ') or line.startswith('Let\'s'):
155
+ continue
156
+
157
+ # Check if it looks like a Diplomacy order
158
+ if (' H' in line or ' -' in line or ' S ' in line or ' C ' in line or
159
+ ' R ' in line or ' D' in line or ' B' in line or line == 'WAIVE'):
160
+ orders.append(line)
161
+
162
+ return len(orders) > 0, orders
163
+
164
+ def _validate_orders(self, orders, observation):
165
+ """Validate if the orders are valid for the current phase.
166
+
167
+ Args:
168
+ orders: List of orders to validate
169
+ observation: Current observation from the environment
170
+
171
+ Returns:
172
+ List of valid orders or None if invalid
173
+ """
174
+ # For simplicity, we'll assume all parsed orders are valid
175
+ # In a real implementation, we would use the game's validation logic
176
+ return orders
177
+
178
+ def _create_clarification_prompt(self, previous_output, parsed_orders):
179
+ """Create a prompt asking for clarification when orders couldn't be parsed.
180
+
181
+ Args:
182
+ previous_output: The previous LLM output
183
+ parsed_orders: Any orders that were successfully parsed
184
+
185
+ Returns:
186
+ A prompt string for the LLM
187
+ """
188
+ prompt = f"I couldn't fully understand your orders for {self.power_name}. "
189
+
190
+ if parsed_orders:
191
+ prompt += f"I understood these orders:\n"
192
+ for order in parsed_orders:
193
+ prompt += f"- {order}\n"
194
+
195
+ prompt += "\nPlease provide clear, valid Diplomacy orders in the format:\n"
196
+ prompt += "- A LON H\n- F NTH - NWY\n- etc.\n"
197
+ return prompt
198
+
199
+ def _create_error_prompt(self, invalid_orders, observation):
200
+ """Create a prompt when orders are invalid.
201
+
202
+ Args:
203
+ invalid_orders: The invalid orders
204
+ observation: Current observation from the environment
205
+
206
+ Returns:
207
+ A prompt string for the LLM
208
+ """
209
+ prompt = f"The following orders for {self.power_name} are invalid:\n"
210
+ for order in invalid_orders:
211
+ prompt += f"- {order}\n"
212
+
213
+ prompt += "\nPlease provide valid orders for your units."
214
+ return prompt
215
+
216
+ def get_log_info(self):
217
+ """Get information about the agent required to log a trajectory.
218
+
219
+ Returns:
220
+ log_info: Information about the agent required to log a trajectory.
221
+ """
222
+ return {
223
+ "power_name": self.power_name,
224
+ "orders": self.orders,
225
+ "wait": self.wait,
226
+ "parsing_state": self.processing_state,
227
+ "message_history": self.message_history
228
+ }
229
+
230
+ def render(self):
231
+ """Render the current state of the agent."""
232
+ print(f"Power: {self.power_name}")
233
+ print(f"Orders: {self.orders}")
234
+ print(f"Wait: {self.wait}")
235
+
236
+ def close(self):
237
+ """Perform any necessary cleanup."""
238
+ pass
239
+
240
+ def _select_random_valid_moves(self, observation):
241
+ """Select random valid moves for all units.
242
+
243
+ Args:
244
+ observation: Current observation from the environment
245
+
246
+ Returns:
247
+ List of valid orders
248
+ """
249
+ import random
250
+
251
+ possible_orders = observation.get('possible_orders', {})
252
+ valid_orders = []
253
+
254
+ # For each location with possible orders, select one randomly
255
+ for location, orders in possible_orders.items():
256
+ if orders: # If there are any possible orders for this location
257
+ valid_orders.append(random.choice(orders))
258
+
259
+ return valid_orders