Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- src_code_for_reproducibility/__pycache__/__init__.cpython-312.pyc +0 -0
- src_code_for_reproducibility/chat_utils/apply_template.py +78 -0
- src_code_for_reproducibility/chat_utils/chat_turn.py +27 -0
- src_code_for_reproducibility/chat_utils/template_specific.py +87 -0
- src_code_for_reproducibility/docs/Makefile +19 -0
- src_code_for_reproducibility/docs/generate_docs.py +249 -0
- src_code_for_reproducibility/docs/make.bat +35 -0
- src_code_for_reproducibility/markov_games/__pycache__/__init__.cpython-312.pyc +0 -0
- src_code_for_reproducibility/markov_games/__pycache__/agent.cpython-312.pyc +0 -0
- src_code_for_reproducibility/markov_games/__pycache__/alternative_actions_runner.cpython-312.pyc +0 -0
- src_code_for_reproducibility/markov_games/__pycache__/gather_and_export_utils.cpython-312.pyc +0 -0
- src_code_for_reproducibility/markov_games/__pycache__/group_timesteps.cpython-312.pyc +0 -0
- src_code_for_reproducibility/markov_games/__pycache__/linear_runner.cpython-312.pyc +0 -0
- src_code_for_reproducibility/markov_games/__pycache__/markov_game.cpython-312.pyc +0 -0
- src_code_for_reproducibility/markov_games/__pycache__/mg_utils.cpython-312.pyc +0 -0
- src_code_for_reproducibility/markov_games/__pycache__/rollout_tree.cpython-312.pyc +0 -0
- src_code_for_reproducibility/markov_games/__pycache__/run_markov_games.cpython-312.pyc +0 -0
- src_code_for_reproducibility/markov_games/__pycache__/simulation.cpython-312.pyc +0 -0
- src_code_for_reproducibility/markov_games/agent.py +76 -0
- src_code_for_reproducibility/markov_games/diplomacy/diplomacy_agent.py +259 -0
- src_code_for_reproducibility/markov_games/diplomacy/diplomacy_env.py +230 -0
- src_code_for_reproducibility/markov_games/diplomacy/diplomacy_logging.py +360 -0
- src_code_for_reproducibility/markov_games/diplomacy/diplomacy_logging_for_training.py +0 -0
- src_code_for_reproducibility/markov_games/ipd/Ipd_hard_coded_agents.py +72 -0
- src_code_for_reproducibility/markov_games/ipd/__init__.py +7 -0
- src_code_for_reproducibility/markov_games/ipd/ipd_agent.py +115 -0
- src_code_for_reproducibility/markov_games/ipd/ipd_simulation.py +162 -0
- src_code_for_reproducibility/markov_games/ipd/ipd_statistics.py +18 -0
- src_code_for_reproducibility/markov_games/linear_runner.py +30 -0
- src_code_for_reproducibility/markov_games/negotiation/README.md +40 -0
- src_code_for_reproducibility/markov_games/negotiation/dond_agent.py +61 -0
- src_code_for_reproducibility/markov_games/negotiation/dond_simulation.py +153 -0
- src_code_for_reproducibility/markov_games/negotiation/nego_agent.py +242 -0
- src_code_for_reproducibility/markov_games/negotiation/nego_hard_coded_policies.py +64 -0
- src_code_for_reproducibility/markov_games/negotiation/nego_simulation.py +241 -0
- src_code_for_reproducibility/markov_games/negotiation/no_press_nego_simulation.py +168 -0
- src_code_for_reproducibility/markov_games/negotiation/tas_agent.py +108 -0
- src_code_for_reproducibility/markov_games/negotiation/tas_rps_simulation.py +248 -0
- src_code_for_reproducibility/markov_games/negotiation/tas_simple_simulation.py +169 -0
- src_code_for_reproducibility/markov_games/vine_ppo.py +10 -0
- src_code_for_reproducibility/models/__init__.py +0 -0
- src_code_for_reproducibility/models/adapter_training_wrapper.py +98 -0
- src_code_for_reproducibility/models/human_policy.py +255 -0
- src_code_for_reproducibility/models/inference_backend.py +39 -0
- src_code_for_reproducibility/models/inference_backend_dummy.py +54 -0
- src_code_for_reproducibility/models/inference_backend_sglang.py +86 -0
- src_code_for_reproducibility/models/inference_backend_sglang_local_server.py +127 -0
- src_code_for_reproducibility/models/inference_backend_vllm.py +117 -0
- src_code_for_reproducibility/models/inference_backend_vllm_local_server.py +160 -0
- src_code_for_reproducibility/models/large_language_model_api.py +171 -0
src_code_for_reproducibility/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (146 Bytes). View file
|
|
|
src_code_for_reproducibility/chat_utils/apply_template.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
from mllm.chat_utils.chat_turn import ChatTurn
|
| 4 |
+
from mllm.chat_utils.template_specific import (
|
| 5 |
+
custom_llama3_template,
|
| 6 |
+
custom_qwen2_template,
|
| 7 |
+
custom_qwen3_template,
|
| 8 |
+
qwen2_assistant_postfix,
|
| 9 |
+
qwen3_assistant_postfix,
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def get_custom_chat_template(tokenizer) -> str:
|
| 14 |
+
"""
|
| 15 |
+
Get the chat template for the tokenizer.
|
| 16 |
+
"""
|
| 17 |
+
if "qwen2" in tokenizer.name_or_path.lower():
|
| 18 |
+
return custom_qwen2_template
|
| 19 |
+
elif "llama" in tokenizer.name_or_path.lower():
|
| 20 |
+
return custom_llama3_template
|
| 21 |
+
elif "qwen3" in tokenizer.name_or_path.lower():
|
| 22 |
+
return custom_qwen3_template
|
| 23 |
+
else:
|
| 24 |
+
raise ValueError(f"Tokenizer {tokenizer.name_or_path} not supported")
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def get_custom_assistant_postfix(tokenizer) -> torch.Tensor:
|
| 28 |
+
"""
|
| 29 |
+
Get the custom assistant postfix for the tokenizer.
|
| 30 |
+
"""
|
| 31 |
+
if "qwen2" in tokenizer.name_or_path.lower():
|
| 32 |
+
return qwen2_assistant_postfix
|
| 33 |
+
elif "qwen3" in tokenizer.name_or_path.lower():
|
| 34 |
+
return qwen3_assistant_postfix
|
| 35 |
+
return torch.tensor([], dtype=torch.long)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def tokenize_chats(chats: list[ChatTurn], tokenizer, enable_thinking) -> None:
|
| 39 |
+
"""
|
| 40 |
+
Set the chat_template_token_ids for each chat turn.
|
| 41 |
+
# TODO: use engine tokens if available
|
| 42 |
+
"""
|
| 43 |
+
custom_template = get_custom_chat_template(tokenizer)
|
| 44 |
+
custom_assistant_postfix: torch.Tensor = get_custom_assistant_postfix(tokenizer)
|
| 45 |
+
for i, chat in enumerate(chats):
|
| 46 |
+
if chat.chat_template_token_ids is None:
|
| 47 |
+
if chat.role == "user":
|
| 48 |
+
next_chat = chats[i + 1] if i + 1 < len(chats) else None
|
| 49 |
+
add_generation_prompt = True
|
| 50 |
+
if next_chat and next_chat.role == "user":
|
| 51 |
+
add_generation_prompt = False
|
| 52 |
+
encoded_chat = tokenizer.apply_chat_template(
|
| 53 |
+
[chat],
|
| 54 |
+
return_tensors="pt",
|
| 55 |
+
chat_template=custom_template,
|
| 56 |
+
add_generation_prompt=add_generation_prompt,
|
| 57 |
+
add_system_prompt=True if i == 0 else False,
|
| 58 |
+
enable_thinking=enable_thinking,
|
| 59 |
+
).flatten()
|
| 60 |
+
previous_chat = chats[i - 1] if i > 0 else None
|
| 61 |
+
if previous_chat and previous_chat.role == "assistant":
|
| 62 |
+
encoded_chat = torch.cat([custom_assistant_postfix, encoded_chat])
|
| 63 |
+
elif chat.role == "assistant":
|
| 64 |
+
encoded_chat = chat.out_token_ids
|
| 65 |
+
chat.chat_template_token_ids = encoded_chat
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def chat_turns_to_token_ids(
|
| 69 |
+
chats: list[ChatTurn], tokenizer, enable_thinking
|
| 70 |
+
) -> list[int]:
|
| 71 |
+
"""
|
| 72 |
+
Tokenize the chat turns and set the chat_template_token_ids for each chat turn.
|
| 73 |
+
"""
|
| 74 |
+
tokenize_chats(chats=chats, tokenizer=tokenizer, enable_thinking=enable_thinking)
|
| 75 |
+
token_ids = []
|
| 76 |
+
for chat in chats:
|
| 77 |
+
token_ids.append(chat.chat_template_token_ids)
|
| 78 |
+
return torch.cat(token_ids)
|
src_code_for_reproducibility/chat_utils/chat_turn.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import json
|
| 4 |
+
from dataclasses import dataclass
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from typing import Any, List, Literal, Optional, Tuple
|
| 7 |
+
|
| 8 |
+
import jsonschema
|
| 9 |
+
import torch
|
| 10 |
+
from pydantic import BaseModel, ConfigDict, Field, model_validator
|
| 11 |
+
|
| 12 |
+
AgentId = str
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class ChatTurn(BaseModel):
|
| 16 |
+
model_config = ConfigDict(arbitrary_types_allowed=True) # needed for torch tensors
|
| 17 |
+
|
| 18 |
+
role: str = Field(pattern="^(user|assistant)$")
|
| 19 |
+
agent_id: AgentId # ID of the agent with which the chat occured
|
| 20 |
+
content: str
|
| 21 |
+
reasoning_content: str | None = None
|
| 22 |
+
chat_template_token_ids: torch.LongTensor | None = None # Token ids of chat template format. For example, token ids of "<assistant>{content}</assistant>""
|
| 23 |
+
out_token_ids: torch.LongTensor | None = (
|
| 24 |
+
None # tokens generated from inference engine
|
| 25 |
+
)
|
| 26 |
+
log_probs: torch.FloatTensor | None = None
|
| 27 |
+
is_state_end: bool = False # indicates whether this chat turn marks the end of a state in the trajectory
|
src_code_for_reproducibility/chat_utils/template_specific.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import huggingface_hub
|
| 2 |
+
import torch
|
| 3 |
+
from transformers import AutoTokenizer
|
| 4 |
+
|
| 5 |
+
custom_llama3_template = """
|
| 6 |
+
{%- if add_system_prompt %}
|
| 7 |
+
{{- '<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nCutting Knowledge Date: December 2023\nToday Date: 26 Jul 2024\n\n<|eot_id|>' }}
|
| 8 |
+
{%- endif %}
|
| 9 |
+
{%- for message in messages %}
|
| 10 |
+
{{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' + message['content'] | trim + '<|eot_id|>' }}
|
| 11 |
+
{%- endfor %}
|
| 12 |
+
|
| 13 |
+
{%- if add_generation_prompt %}
|
| 14 |
+
{{- '<|start_header_id|>' + 'assistant' + '<|end_header_id|>\n\n' }}
|
| 15 |
+
{%- endif %}
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
qwen2_assistant_postfix = (
|
| 19 |
+
AutoTokenizer.from_pretrained("Qwen/Qwen2.5-7B-Instruct")
|
| 20 |
+
.encode("\n", return_tensors="pt")
|
| 21 |
+
.flatten()
|
| 22 |
+
)
|
| 23 |
+
qwen3_assistant_postfix = (
|
| 24 |
+
AutoTokenizer.from_pretrained("Qwen/Qwen3-8B")
|
| 25 |
+
.encode("\n", return_tensors="pt")
|
| 26 |
+
.flatten()
|
| 27 |
+
)
|
| 28 |
+
custom_qwen2_template = """
|
| 29 |
+
{%- if add_system_prompt %}
|
| 30 |
+
{{- '<|im_start|>system\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\n' }}
|
| 31 |
+
{%- endif %}
|
| 32 |
+
{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
|
| 33 |
+
{%- for message in messages %}
|
| 34 |
+
{%- if message.content is string %}
|
| 35 |
+
{%- set content = message.content %}
|
| 36 |
+
{%- else %}
|
| 37 |
+
{%- set content = '' %}
|
| 38 |
+
{%- endif %}
|
| 39 |
+
{%- if (message.role == "user") %}
|
| 40 |
+
{{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
|
| 41 |
+
{%- elif message.role == "assistant" %}
|
| 42 |
+
{%- set reasoning_content = '' %}
|
| 43 |
+
{%- if message.reasoning_content is string %}
|
| 44 |
+
{%- set reasoning_content = message.reasoning_content %}
|
| 45 |
+
{%- else %}
|
| 46 |
+
{%- if '</think>' in content %}
|
| 47 |
+
{%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
|
| 48 |
+
{%- set content = content.split('</think>')[-1].lstrip('\n') %}
|
| 49 |
+
{%- endif %}
|
| 50 |
+
{%- endif %}
|
| 51 |
+
{%- if loop.index0 > ns.last_query_index %}
|
| 52 |
+
{%- if reasoning_content %}
|
| 53 |
+
{{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
|
| 54 |
+
{%- else %}
|
| 55 |
+
{{- '<|im_start|>' + message.role + '\n' + content }}
|
| 56 |
+
{%- endif %}
|
| 57 |
+
{%- else %}
|
| 58 |
+
{{- '<|im_start|>' + message.role + '\n' + content }}
|
| 59 |
+
{%- endif %}
|
| 60 |
+
{{- '<|im_end|>\n' }}
|
| 61 |
+
{%- endif %}
|
| 62 |
+
{%- endfor %}
|
| 63 |
+
{%- if add_generation_prompt %}
|
| 64 |
+
{{- '<|im_start|>assistant\n' }}
|
| 65 |
+
{%- endif %}
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
custom_qwen3_template = """
|
| 69 |
+
{%- for message in messages %}
|
| 70 |
+
{%- if message.content is string %}
|
| 71 |
+
{%- set content = message.content %}
|
| 72 |
+
{%- else %}
|
| 73 |
+
{%- set content = '' %}
|
| 74 |
+
{%- endif %}
|
| 75 |
+
{%- if (message.role == "user") %}
|
| 76 |
+
{{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
|
| 77 |
+
{%- elif message.role == "assistant" %}
|
| 78 |
+
{{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
|
| 79 |
+
{%- endif %}
|
| 80 |
+
{%- endfor %}
|
| 81 |
+
{%- if add_generation_prompt %}
|
| 82 |
+
{{- '<|im_start|>assistant\n' }}
|
| 83 |
+
{%- if enable_thinking is defined and enable_thinking is false %}
|
| 84 |
+
{{- '<think>\n\n</think>\n\n' }}
|
| 85 |
+
{%- endif %}
|
| 86 |
+
{%- endif %}
|
| 87 |
+
"""
|
src_code_for_reproducibility/docs/Makefile
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Minimal makefile for Sphinx documentation
|
| 2 |
+
|
| 3 |
+
# You can set these variables from the command line, and also
|
| 4 |
+
# from the environment for the first two.
|
| 5 |
+
SPHINXOPTS ?=
|
| 6 |
+
SPHINXBUILD ?= sphinx-build
|
| 7 |
+
SOURCEDIR = source
|
| 8 |
+
BUILDDIR = build
|
| 9 |
+
|
| 10 |
+
# Put it first so that "make" without argument is like "make help".
|
| 11 |
+
help:
|
| 12 |
+
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(SPHINXFLAGS)
|
| 13 |
+
|
| 14 |
+
.PHONY: help Makefile
|
| 15 |
+
|
| 16 |
+
# Catch-all target: route all unknown targets to Sphinx using the new
|
| 17 |
+
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
| 18 |
+
%: Makefile
|
| 19 |
+
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(SPHINXFLAGS)
|
src_code_for_reproducibility/docs/generate_docs.py
ADDED
|
@@ -0,0 +1,249 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Script to automatically generate Sphinx documentation for all modules and build the HTML website.
|
| 4 |
+
"""
|
| 5 |
+
import importlib.util
|
| 6 |
+
import os
|
| 7 |
+
import subprocess
|
| 8 |
+
import sys
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def check_and_install_dependencies():
|
| 12 |
+
"""Check for required dependencies and install them if missing."""
|
| 13 |
+
required_packages = [
|
| 14 |
+
"sphinx",
|
| 15 |
+
"sphinx-rtd-theme",
|
| 16 |
+
"sphinxcontrib-napoleon",
|
| 17 |
+
"sphinxcontrib-mermaid",
|
| 18 |
+
"sphinx-autodoc-typehints",
|
| 19 |
+
]
|
| 20 |
+
|
| 21 |
+
missing_packages = []
|
| 22 |
+
|
| 23 |
+
for package in required_packages:
|
| 24 |
+
# Convert package name to module name (replace - with _)
|
| 25 |
+
module_name = package.replace("-", "_")
|
| 26 |
+
|
| 27 |
+
# Check if the package is installed
|
| 28 |
+
if importlib.util.find_spec(module_name) is None:
|
| 29 |
+
missing_packages.append(package)
|
| 30 |
+
|
| 31 |
+
# Install missing packages
|
| 32 |
+
if missing_packages:
|
| 33 |
+
print(f"Installing missing dependencies: {', '.join(missing_packages)}")
|
| 34 |
+
subprocess.check_call(
|
| 35 |
+
[sys.executable, "-m", "pip", "install"] + missing_packages
|
| 36 |
+
)
|
| 37 |
+
print("Dependencies installed successfully")
|
| 38 |
+
else:
|
| 39 |
+
print("All required dependencies are already installed")
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def create_makefile(docs_dir):
|
| 43 |
+
"""Create a Makefile for Sphinx documentation if it doesn't exist."""
|
| 44 |
+
makefile_path = os.path.join(docs_dir, "Makefile")
|
| 45 |
+
|
| 46 |
+
if os.path.exists(makefile_path):
|
| 47 |
+
print(f"Makefile already exists at {makefile_path}")
|
| 48 |
+
return
|
| 49 |
+
|
| 50 |
+
print(f"Creating Makefile at {makefile_path}")
|
| 51 |
+
|
| 52 |
+
makefile_content = """# Minimal makefile for Sphinx documentation
|
| 53 |
+
|
| 54 |
+
# You can set these variables from the command line, and also
|
| 55 |
+
# from the environment for the first two.
|
| 56 |
+
SPHINXOPTS ?=
|
| 57 |
+
SPHINXBUILD ?= sphinx-build
|
| 58 |
+
SOURCEDIR = source
|
| 59 |
+
BUILDDIR = build
|
| 60 |
+
|
| 61 |
+
# Put it first so that "make" without argument is like "make help".
|
| 62 |
+
help:
|
| 63 |
+
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(SPHINXFLAGS)
|
| 64 |
+
|
| 65 |
+
.PHONY: help Makefile
|
| 66 |
+
|
| 67 |
+
# Catch-all target: route all unknown targets to Sphinx using the new
|
| 68 |
+
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
| 69 |
+
%: Makefile
|
| 70 |
+
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(SPHINXFLAGS)
|
| 71 |
+
"""
|
| 72 |
+
|
| 73 |
+
with open(makefile_path, "w") as f:
|
| 74 |
+
f.write(makefile_content)
|
| 75 |
+
|
| 76 |
+
print("Makefile created successfully")
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def create_make_bat(docs_dir):
|
| 80 |
+
"""Create a make.bat file for Windows if it doesn't exist."""
|
| 81 |
+
make_bat_path = os.path.join(docs_dir, "make.bat")
|
| 82 |
+
|
| 83 |
+
if os.path.exists(make_bat_path):
|
| 84 |
+
print(f"make.bat already exists at {make_bat_path}")
|
| 85 |
+
return
|
| 86 |
+
|
| 87 |
+
print(f"Creating make.bat at {make_bat_path}")
|
| 88 |
+
|
| 89 |
+
make_bat_content = """@ECHO OFF
|
| 90 |
+
|
| 91 |
+
pushd %~dp0
|
| 92 |
+
|
| 93 |
+
REM Command file for Sphinx documentation
|
| 94 |
+
|
| 95 |
+
if "%SPHINXBUILD%" == "" (
|
| 96 |
+
set SPHINXBUILD=sphinx-build
|
| 97 |
+
)
|
| 98 |
+
set SOURCEDIR=source
|
| 99 |
+
set BUILDDIR=build
|
| 100 |
+
|
| 101 |
+
%SPHINXBUILD% >NUL 2>NUL
|
| 102 |
+
if errorlevel 9009 (
|
| 103 |
+
echo.
|
| 104 |
+
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
|
| 105 |
+
echo.installed, then set the SPHINXBUILD environment variable to point
|
| 106 |
+
echo.to the full path of the 'sphinx-build' executable. Alternatively you
|
| 107 |
+
echo.may add the Sphinx directory to PATH.
|
| 108 |
+
echo.
|
| 109 |
+
echo.If you don't have Sphinx installed, grab it from
|
| 110 |
+
echo.https://www.sphinx-doc.org/
|
| 111 |
+
exit /b 1
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
if "%1" == "" goto help
|
| 115 |
+
|
| 116 |
+
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
|
| 117 |
+
goto end
|
| 118 |
+
|
| 119 |
+
:help
|
| 120 |
+
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
|
| 121 |
+
|
| 122 |
+
:end
|
| 123 |
+
popd
|
| 124 |
+
"""
|
| 125 |
+
|
| 126 |
+
with open(make_bat_path, "w") as f:
|
| 127 |
+
f.write(make_bat_content)
|
| 128 |
+
|
| 129 |
+
print("make.bat created successfully")
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def main():
|
| 133 |
+
# Check and install required dependencies
|
| 134 |
+
print("=== Checking dependencies ===")
|
| 135 |
+
check_and_install_dependencies()
|
| 136 |
+
|
| 137 |
+
# Get the directory of this script
|
| 138 |
+
script_dir = os.path.dirname(os.path.abspath(__file__))
|
| 139 |
+
|
| 140 |
+
# Path to the project root
|
| 141 |
+
project_root = os.path.dirname(script_dir)
|
| 142 |
+
|
| 143 |
+
# Path to the source directory
|
| 144 |
+
source_dir = os.path.join(project_root, "src")
|
| 145 |
+
|
| 146 |
+
# Path to the docs source directory
|
| 147 |
+
docs_source_dir = os.path.join(script_dir, "source")
|
| 148 |
+
|
| 149 |
+
# Print paths for debugging
|
| 150 |
+
print(f"Script directory: {script_dir}")
|
| 151 |
+
print(f"Project root: {project_root}")
|
| 152 |
+
print(f"Source directory: {source_dir}")
|
| 153 |
+
print(f"Docs source directory: {docs_source_dir}")
|
| 154 |
+
|
| 155 |
+
# Make sure the source directory exists
|
| 156 |
+
if not os.path.exists(source_dir):
|
| 157 |
+
print(f"Error: Source directory {source_dir} does not exist!")
|
| 158 |
+
sys.exit(1)
|
| 159 |
+
|
| 160 |
+
# Make sure the docs source directory exists
|
| 161 |
+
if not os.path.exists(docs_source_dir):
|
| 162 |
+
print(f"Creating docs source directory: {docs_source_dir}")
|
| 163 |
+
os.makedirs(docs_source_dir)
|
| 164 |
+
|
| 165 |
+
# Step 1: Run sphinx-apidoc to generate .rst files for all modules
|
| 166 |
+
print("\n=== Generating API documentation ===")
|
| 167 |
+
cmd = [
|
| 168 |
+
"sphinx-apidoc",
|
| 169 |
+
"-f", # Force overwriting of existing files
|
| 170 |
+
"-e", # Put module documentation before submodule documentation
|
| 171 |
+
"-M", # Put module documentation before subpackage documentation
|
| 172 |
+
"-o",
|
| 173 |
+
docs_source_dir, # Output directory
|
| 174 |
+
source_dir, # Source code directory
|
| 175 |
+
]
|
| 176 |
+
|
| 177 |
+
print(f"Running command: {' '.join(cmd)}")
|
| 178 |
+
result = subprocess.run(cmd, capture_output=True, text=True)
|
| 179 |
+
|
| 180 |
+
# Print the output of the command
|
| 181 |
+
print("STDOUT:")
|
| 182 |
+
print(result.stdout)
|
| 183 |
+
|
| 184 |
+
print("STDERR:")
|
| 185 |
+
print(result.stderr)
|
| 186 |
+
|
| 187 |
+
if result.returncode != 0:
|
| 188 |
+
print(f"Error: sphinx-apidoc failed with return code {result.returncode}")
|
| 189 |
+
sys.exit(1)
|
| 190 |
+
|
| 191 |
+
# List the files in the docs source directory
|
| 192 |
+
print("\nFiles in docs/source directory:")
|
| 193 |
+
for file in sorted(os.listdir(docs_source_dir)):
|
| 194 |
+
print(f" {file}")
|
| 195 |
+
|
| 196 |
+
print("\nDocumentation source files generated successfully!")
|
| 197 |
+
|
| 198 |
+
# Step 2: Create Makefile and make.bat if they don't exist
|
| 199 |
+
create_makefile(script_dir)
|
| 200 |
+
create_make_bat(script_dir)
|
| 201 |
+
|
| 202 |
+
# Step 3: Build the HTML documentation
|
| 203 |
+
print("\n=== Building HTML documentation ===")
|
| 204 |
+
|
| 205 |
+
# Determine the build command based on the platform
|
| 206 |
+
if os.name == "nt": # Windows
|
| 207 |
+
build_cmd = ["make.bat", "html"]
|
| 208 |
+
else: # Unix/Linux/Mac
|
| 209 |
+
build_cmd = ["make", "html"]
|
| 210 |
+
|
| 211 |
+
# Change to the docs directory to run the build command
|
| 212 |
+
os.chdir(script_dir)
|
| 213 |
+
|
| 214 |
+
print(f"Running command: {' '.join(build_cmd)}")
|
| 215 |
+
build_result = subprocess.run(build_cmd, capture_output=True, text=True)
|
| 216 |
+
|
| 217 |
+
# Print the output of the build command
|
| 218 |
+
print("STDOUT:")
|
| 219 |
+
print(build_result.stdout)
|
| 220 |
+
|
| 221 |
+
print("STDERR:")
|
| 222 |
+
print(build_result.stderr)
|
| 223 |
+
|
| 224 |
+
if build_result.returncode != 0:
|
| 225 |
+
print(f"Error: HTML build failed with return code {build_result.returncode}")
|
| 226 |
+
sys.exit(1)
|
| 227 |
+
|
| 228 |
+
# Get the path to the built HTML documentation
|
| 229 |
+
html_dir = os.path.join(script_dir, "build", "html")
|
| 230 |
+
index_path = os.path.join(html_dir, "index.html")
|
| 231 |
+
|
| 232 |
+
if os.path.exists(index_path):
|
| 233 |
+
print(f"\nHTML documentation built successfully!")
|
| 234 |
+
print(f"You can view it by opening: {index_path}")
|
| 235 |
+
|
| 236 |
+
# Try to open the documentation in a browser
|
| 237 |
+
try:
|
| 238 |
+
import webbrowser
|
| 239 |
+
|
| 240 |
+
print("\nAttempting to open documentation in your default browser...")
|
| 241 |
+
webbrowser.open(f"file://{index_path}")
|
| 242 |
+
except Exception as e:
|
| 243 |
+
print(f"Could not open browser automatically: {e}")
|
| 244 |
+
else:
|
| 245 |
+
print(f"\nWarning: HTML index file not found at {index_path}")
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
if __name__ == "__main__":
|
| 249 |
+
main()
|
src_code_for_reproducibility/docs/make.bat
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
@ECHO OFF
|
| 2 |
+
|
| 3 |
+
pushd %~dp0
|
| 4 |
+
|
| 5 |
+
REM Command file for Sphinx documentation
|
| 6 |
+
|
| 7 |
+
if "%SPHINXBUILD%" == "" (
|
| 8 |
+
set SPHINXBUILD=sphinx-build
|
| 9 |
+
)
|
| 10 |
+
set SOURCEDIR=source
|
| 11 |
+
set BUILDDIR=build
|
| 12 |
+
|
| 13 |
+
%SPHINXBUILD% >NUL 2>NUL
|
| 14 |
+
if errorlevel 9009 (
|
| 15 |
+
echo.
|
| 16 |
+
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
|
| 17 |
+
echo.installed, then set the SPHINXBUILD environment variable to point
|
| 18 |
+
echo.to the full path of the 'sphinx-build' executable. Alternatively you
|
| 19 |
+
echo.may add the Sphinx directory to PATH.
|
| 20 |
+
echo.
|
| 21 |
+
echo.If you don't have Sphinx installed, grab it from
|
| 22 |
+
echo.https://www.sphinx-doc.org/
|
| 23 |
+
exit /b 1
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
if "%1" == "" goto help
|
| 27 |
+
|
| 28 |
+
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
|
| 29 |
+
goto end
|
| 30 |
+
|
| 31 |
+
:help
|
| 32 |
+
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
|
| 33 |
+
|
| 34 |
+
:end
|
| 35 |
+
popd
|
src_code_for_reproducibility/markov_games/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (159 Bytes). View file
|
|
|
src_code_for_reproducibility/markov_games/__pycache__/agent.cpython-312.pyc
ADDED
|
Binary file (3.2 kB). View file
|
|
|
src_code_for_reproducibility/markov_games/__pycache__/alternative_actions_runner.cpython-312.pyc
ADDED
|
Binary file (4.95 kB). View file
|
|
|
src_code_for_reproducibility/markov_games/__pycache__/gather_and_export_utils.cpython-312.pyc
ADDED
|
Binary file (46.5 kB). View file
|
|
|
src_code_for_reproducibility/markov_games/__pycache__/group_timesteps.cpython-312.pyc
ADDED
|
Binary file (6.17 kB). View file
|
|
|
src_code_for_reproducibility/markov_games/__pycache__/linear_runner.cpython-312.pyc
ADDED
|
Binary file (1.25 kB). View file
|
|
|
src_code_for_reproducibility/markov_games/__pycache__/markov_game.cpython-312.pyc
ADDED
|
Binary file (9.72 kB). View file
|
|
|
src_code_for_reproducibility/markov_games/__pycache__/mg_utils.cpython-312.pyc
ADDED
|
Binary file (3.98 kB). View file
|
|
|
src_code_for_reproducibility/markov_games/__pycache__/rollout_tree.cpython-312.pyc
ADDED
|
Binary file (3.67 kB). View file
|
|
|
src_code_for_reproducibility/markov_games/__pycache__/run_markov_games.cpython-312.pyc
ADDED
|
Binary file (1.14 kB). View file
|
|
|
src_code_for_reproducibility/markov_games/__pycache__/simulation.cpython-312.pyc
ADDED
|
Binary file (3.9 kB). View file
|
|
|
src_code_for_reproducibility/markov_games/agent.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
In simple RL paradise, where the action dimensions are constant and well defined,
|
| 3 |
+
Agent classes are not necessary. But in MARL, with LLM's, there isn't always
|
| 4 |
+
a direct path from policy to action. For instance, from the observation of the environment,
|
| 5 |
+
a prompt must be created. Then, the outputs of the policy might be incorrect, so a second
|
| 6 |
+
request to the LLM must be sent before the action is well defined. This is why this Agent class exists.
|
| 7 |
+
It acts as a mini environment, bridging the gap between the core simulation and
|
| 8 |
+
the LLM policies.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from abc import ABC, abstractmethod
|
| 12 |
+
from collections.abc import Callable
|
| 13 |
+
from typing import Any, Tuple
|
| 14 |
+
|
| 15 |
+
from numpy.random import default_rng
|
| 16 |
+
|
| 17 |
+
from mllm.markov_games.rollout_tree import AgentActLog
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class Agent(ABC):
|
| 21 |
+
@abstractmethod
|
| 22 |
+
def __init__(
|
| 23 |
+
self,
|
| 24 |
+
seed: int,
|
| 25 |
+
agent_id: str,
|
| 26 |
+
agent_name: str,
|
| 27 |
+
agent_policy: Callable[[list[dict]], str],
|
| 28 |
+
*args,
|
| 29 |
+
**kwargs,
|
| 30 |
+
):
|
| 31 |
+
"""
|
| 32 |
+
Initialize the agent state.
|
| 33 |
+
"""
|
| 34 |
+
self.seed = seed
|
| 35 |
+
self.agent_id = agent_id
|
| 36 |
+
self.agent_name = agent_name
|
| 37 |
+
self.policy = policy
|
| 38 |
+
self.rng = default_rng(self.seed)
|
| 39 |
+
raise NotImplementedError
|
| 40 |
+
|
| 41 |
+
async def act(self, observation) -> Tuple[Any, AgentActLog]:
|
| 42 |
+
"""
|
| 43 |
+
Query (possibly multiple times) a policy (or possibly a pool of policies) to
|
| 44 |
+
obtain the action of the agent.
|
| 45 |
+
|
| 46 |
+
Example:
|
| 47 |
+
action = None
|
| 48 |
+
prompt = self.observation_to_prompt(observation)
|
| 49 |
+
while not self.valid(action):
|
| 50 |
+
output = await self.policy.generate(prompt)
|
| 51 |
+
action = self.policy_output_to_action(output)
|
| 52 |
+
return action
|
| 53 |
+
|
| 54 |
+
Returns:
|
| 55 |
+
action
|
| 56 |
+
step_info
|
| 57 |
+
"""
|
| 58 |
+
raise NotImplementedError
|
| 59 |
+
|
| 60 |
+
def get_safe_copy(self):
|
| 61 |
+
"""
|
| 62 |
+
Return copy of the agent object that is decorrelated from the original object.
|
| 63 |
+
"""
|
| 64 |
+
raise NotImplementedError
|
| 65 |
+
|
| 66 |
+
def reset(self):
|
| 67 |
+
raise NotImplementedError
|
| 68 |
+
|
| 69 |
+
def render(self):
|
| 70 |
+
raise NotImplementedError
|
| 71 |
+
|
| 72 |
+
def close(self):
|
| 73 |
+
raise NotImplementedError
|
| 74 |
+
|
| 75 |
+
def get_agent_info(self):
|
| 76 |
+
raise NotImplementedError
|
src_code_for_reproducibility/markov_games/diplomacy/diplomacy_agent.py
ADDED
|
@@ -0,0 +1,259 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, List, Tuple, Optional, Any
|
| 2 |
+
import copy
|
| 3 |
+
|
| 4 |
+
class DiplomacyAgent:
|
| 5 |
+
"""Agent handler for Diplomacy game that follows the MARL standard.
|
| 6 |
+
|
| 7 |
+
This class is responsible for parsing LLM output into valid Diplomacy orders,
|
| 8 |
+
managing the agent state, and providing information for logging.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
def __init__(self, policy_id: str, power_name: str, random_valid_move=False):
|
| 12 |
+
"""Initialize the agent handler for a power in the Diplomacy game.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
power_name: The name of the power this agent controls (e.g., 'FRANCE', 'ENGLAND')
|
| 16 |
+
policy_id: The identifier for the policy this agent uses
|
| 17 |
+
random_valid_move: If True, will select random valid moves instead of using LLM (default: False)
|
| 18 |
+
"""
|
| 19 |
+
self.policy_id = policy_id
|
| 20 |
+
self.power_name = power_name
|
| 21 |
+
self.orders = []
|
| 22 |
+
self.wait = True
|
| 23 |
+
self.processing_state = "WAITING_FOR_ORDERS"
|
| 24 |
+
self.parsed_orders = []
|
| 25 |
+
self.order_status = {}
|
| 26 |
+
self.message_history = []
|
| 27 |
+
self.random_valid_move = random_valid_move
|
| 28 |
+
|
| 29 |
+
def step(self, observation_from_env, policy_output=None):
|
| 30 |
+
"""Update the agent state based on the observation and LLM output.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
observation_from_env: The observation from the environment
|
| 34 |
+
policy_output: The output from the LLM
|
| 35 |
+
|
| 36 |
+
Returns:
|
| 37 |
+
policy_id: The policy identifier
|
| 38 |
+
policy_input: The input to the policy
|
| 39 |
+
action: The official action to be sent to the environment
|
| 40 |
+
done: Whether the LLM action is ready to be sent to the environment
|
| 41 |
+
info: Additional information about the agent
|
| 42 |
+
"""
|
| 43 |
+
info = {}
|
| 44 |
+
|
| 45 |
+
# If random_valid_move is enabled, select random valid moves
|
| 46 |
+
if self.random_valid_move:
|
| 47 |
+
valid_orders = self._select_random_valid_moves(observation_from_env)
|
| 48 |
+
self.orders = valid_orders
|
| 49 |
+
self.wait = False
|
| 50 |
+
action = {
|
| 51 |
+
"orders": valid_orders,
|
| 52 |
+
"wait": False
|
| 53 |
+
}
|
| 54 |
+
return self.policy_id, {}, action, True, info
|
| 55 |
+
|
| 56 |
+
# If no policy output, this is the initial step - prepare prompt
|
| 57 |
+
if policy_output is None:
|
| 58 |
+
# Create initial prompt for the LLM
|
| 59 |
+
phase = observation_from_env.get('phase', '')
|
| 60 |
+
units = observation_from_env.get('units', {}).get(self.power_name, [])
|
| 61 |
+
centers = observation_from_env.get('centers', {}).get(self.power_name, [])
|
| 62 |
+
orderable_locations = observation_from_env.get('orderable_locations', {})
|
| 63 |
+
|
| 64 |
+
prompt = self._create_prompt(phase, units, centers, orderable_locations)
|
| 65 |
+
|
| 66 |
+
return self.policy_id, {"prompt": prompt}, None, False, info
|
| 67 |
+
|
| 68 |
+
# Process the LLM output to extract orders
|
| 69 |
+
success, parsed_orders = self._parse_llm_output(policy_output)
|
| 70 |
+
self.parsed_orders = parsed_orders
|
| 71 |
+
|
| 72 |
+
if not success:
|
| 73 |
+
# Need more information from LLM
|
| 74 |
+
clarification_prompt = self._create_clarification_prompt(policy_output, parsed_orders)
|
| 75 |
+
return self.policy_id, {"prompt": clarification_prompt}, None, False, info
|
| 76 |
+
|
| 77 |
+
# Validate if the orders are valid for the current phase
|
| 78 |
+
valid_orders = self._validate_orders(parsed_orders, observation_from_env)
|
| 79 |
+
|
| 80 |
+
if valid_orders:
|
| 81 |
+
# Orders are valid, prepare action for environment
|
| 82 |
+
self.orders = valid_orders
|
| 83 |
+
self.wait = False
|
| 84 |
+
action = {
|
| 85 |
+
"orders": valid_orders,
|
| 86 |
+
"wait": False
|
| 87 |
+
}
|
| 88 |
+
return self.policy_id, {}, action, True, info
|
| 89 |
+
else:
|
| 90 |
+
# Orders are invalid, ask for new ones
|
| 91 |
+
error_prompt = self._create_error_prompt(parsed_orders, observation_from_env)
|
| 92 |
+
return self.policy_id, {"prompt": error_prompt}, None, False, info
|
| 93 |
+
|
| 94 |
+
def _create_prompt(self, phase, units, centers, orderable_locations):
|
| 95 |
+
"""Create the initial prompt for the LLM.
|
| 96 |
+
|
| 97 |
+
Args:
|
| 98 |
+
phase: The current game phase
|
| 99 |
+
units: List of units controlled by this power
|
| 100 |
+
centers: List of supply centers controlled by this power
|
| 101 |
+
orderable_locations: List of locations where orders can be issued
|
| 102 |
+
|
| 103 |
+
Returns:
|
| 104 |
+
A prompt string for the LLM
|
| 105 |
+
"""
|
| 106 |
+
prompt = f"You are playing as {self.power_name} in Diplomacy. The current phase is {phase}.\n\n"
|
| 107 |
+
prompt += f"Your units: {', '.join(units)}\n"
|
| 108 |
+
prompt += f"Your supply centers: {', '.join(centers)}\n"
|
| 109 |
+
prompt += f"Locations you can order: {', '.join(orderable_locations)}\n\n"
|
| 110 |
+
|
| 111 |
+
if phase.endswith('M'): # Movement phase
|
| 112 |
+
prompt += "Please provide orders for your units in the form:\n"
|
| 113 |
+
prompt += "- A LON H (hold)\n"
|
| 114 |
+
prompt += "- F NTH - NWY (move)\n"
|
| 115 |
+
prompt += "- A WAL S F LON (support)\n"
|
| 116 |
+
prompt += "- F NWG C A NWY - EDI (convoy)\n"
|
| 117 |
+
elif phase.endswith('R'): # Retreat phase
|
| 118 |
+
prompt += "Please provide retreat orders for your dislodged units:\n"
|
| 119 |
+
prompt += "- A PAR R MAR (retreat to MAR)\n"
|
| 120 |
+
prompt += "- A PAR D (disband)\n"
|
| 121 |
+
elif phase.endswith('A'): # Adjustment phase
|
| 122 |
+
if len(units) < len(centers):
|
| 123 |
+
prompt += "You can build units. Please provide build orders:\n"
|
| 124 |
+
prompt += "- A PAR B (build army in PAR)\n"
|
| 125 |
+
prompt += "- F BRE B (build fleet in BRE)\n"
|
| 126 |
+
prompt += "- WAIVE (waive a build)\n"
|
| 127 |
+
elif len(units) > len(centers):
|
| 128 |
+
prompt += "You must remove units. Please provide disbandment orders:\n"
|
| 129 |
+
prompt += "- A PAR D (disband army in PAR)\n"
|
| 130 |
+
prompt += "- F BRE D (disband fleet in BRE)\n"
|
| 131 |
+
|
| 132 |
+
prompt += "\nProvide your orders as a list, one per line."
|
| 133 |
+
return prompt
|
| 134 |
+
|
| 135 |
+
def _parse_llm_output(self, llm_output):
|
| 136 |
+
"""Parse the LLM output to extract orders.
|
| 137 |
+
|
| 138 |
+
Args:
|
| 139 |
+
llm_output: The raw output from the LLM
|
| 140 |
+
|
| 141 |
+
Returns:
|
| 142 |
+
success: Whether parsing was successful
|
| 143 |
+
parsed_orders: List of parsed orders
|
| 144 |
+
"""
|
| 145 |
+
# Simple parsing for now - extract lines that look like orders
|
| 146 |
+
lines = llm_output.strip().split('\n')
|
| 147 |
+
orders = []
|
| 148 |
+
|
| 149 |
+
for line in lines:
|
| 150 |
+
# Remove list markers, hyphens, etc.
|
| 151 |
+
line = line.strip('- *•').strip()
|
| 152 |
+
|
| 153 |
+
# Skip empty lines and lines that don't look like orders
|
| 154 |
+
if not line or line.startswith('I ') or line.startswith('Let\'s'):
|
| 155 |
+
continue
|
| 156 |
+
|
| 157 |
+
# Check if it looks like a Diplomacy order
|
| 158 |
+
if (' H' in line or ' -' in line or ' S ' in line or ' C ' in line or
|
| 159 |
+
' R ' in line or ' D' in line or ' B' in line or line == 'WAIVE'):
|
| 160 |
+
orders.append(line)
|
| 161 |
+
|
| 162 |
+
return len(orders) > 0, orders
|
| 163 |
+
|
| 164 |
+
def _validate_orders(self, orders, observation):
|
| 165 |
+
"""Validate if the orders are valid for the current phase.
|
| 166 |
+
|
| 167 |
+
Args:
|
| 168 |
+
orders: List of orders to validate
|
| 169 |
+
observation: Current observation from the environment
|
| 170 |
+
|
| 171 |
+
Returns:
|
| 172 |
+
List of valid orders or None if invalid
|
| 173 |
+
"""
|
| 174 |
+
# For simplicity, we'll assume all parsed orders are valid
|
| 175 |
+
# In a real implementation, we would use the game's validation logic
|
| 176 |
+
return orders
|
| 177 |
+
|
| 178 |
+
def _create_clarification_prompt(self, previous_output, parsed_orders):
|
| 179 |
+
"""Create a prompt asking for clarification when orders couldn't be parsed.
|
| 180 |
+
|
| 181 |
+
Args:
|
| 182 |
+
previous_output: The previous LLM output
|
| 183 |
+
parsed_orders: Any orders that were successfully parsed
|
| 184 |
+
|
| 185 |
+
Returns:
|
| 186 |
+
A prompt string for the LLM
|
| 187 |
+
"""
|
| 188 |
+
prompt = f"I couldn't fully understand your orders for {self.power_name}. "
|
| 189 |
+
|
| 190 |
+
if parsed_orders:
|
| 191 |
+
prompt += f"I understood these orders:\n"
|
| 192 |
+
for order in parsed_orders:
|
| 193 |
+
prompt += f"- {order}\n"
|
| 194 |
+
|
| 195 |
+
prompt += "\nPlease provide clear, valid Diplomacy orders in the format:\n"
|
| 196 |
+
prompt += "- A LON H\n- F NTH - NWY\n- etc.\n"
|
| 197 |
+
return prompt
|
| 198 |
+
|
| 199 |
+
def _create_error_prompt(self, invalid_orders, observation):
|
| 200 |
+
"""Create a prompt when orders are invalid.
|
| 201 |
+
|
| 202 |
+
Args:
|
| 203 |
+
invalid_orders: The invalid orders
|
| 204 |
+
observation: Current observation from the environment
|
| 205 |
+
|
| 206 |
+
Returns:
|
| 207 |
+
A prompt string for the LLM
|
| 208 |
+
"""
|
| 209 |
+
prompt = f"The following orders for {self.power_name} are invalid:\n"
|
| 210 |
+
for order in invalid_orders:
|
| 211 |
+
prompt += f"- {order}\n"
|
| 212 |
+
|
| 213 |
+
prompt += "\nPlease provide valid orders for your units."
|
| 214 |
+
return prompt
|
| 215 |
+
|
| 216 |
+
def get_log_info(self):
|
| 217 |
+
"""Get information about the agent required to log a trajectory.
|
| 218 |
+
|
| 219 |
+
Returns:
|
| 220 |
+
log_info: Information about the agent required to log a trajectory.
|
| 221 |
+
"""
|
| 222 |
+
return {
|
| 223 |
+
"power_name": self.power_name,
|
| 224 |
+
"orders": self.orders,
|
| 225 |
+
"wait": self.wait,
|
| 226 |
+
"parsing_state": self.processing_state,
|
| 227 |
+
"message_history": self.message_history
|
| 228 |
+
}
|
| 229 |
+
|
| 230 |
+
def render(self):
|
| 231 |
+
"""Render the current state of the agent."""
|
| 232 |
+
print(f"Power: {self.power_name}")
|
| 233 |
+
print(f"Orders: {self.orders}")
|
| 234 |
+
print(f"Wait: {self.wait}")
|
| 235 |
+
|
| 236 |
+
def close(self):
|
| 237 |
+
"""Perform any necessary cleanup."""
|
| 238 |
+
pass
|
| 239 |
+
|
| 240 |
+
def _select_random_valid_moves(self, observation):
|
| 241 |
+
"""Select random valid moves for all units.
|
| 242 |
+
|
| 243 |
+
Args:
|
| 244 |
+
observation: Current observation from the environment
|
| 245 |
+
|
| 246 |
+
Returns:
|
| 247 |
+
List of valid orders
|
| 248 |
+
"""
|
| 249 |
+
import random
|
| 250 |
+
|
| 251 |
+
possible_orders = observation.get('possible_orders', {})
|
| 252 |
+
valid_orders = []
|
| 253 |
+
|
| 254 |
+
# For each location with possible orders, select one randomly
|
| 255 |
+
for location, orders in possible_orders.items():
|
| 256 |
+
if orders: # If there are any possible orders for this location
|
| 257 |
+
valid_orders.append(random.choice(orders))
|
| 258 |
+
|
| 259 |
+
return valid_orders
|
src_code_for_reproducibility/markov_games/diplomacy/diplomacy_env.py
ADDED
|
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, List, Tuple, Optional, Any
|
| 2 |
+
from diplomacy import Game
|
| 3 |
+
import random
|
| 4 |
+
|
| 5 |
+
class DiplomacyEnv:
|
| 6 |
+
"""Multi-Agent Reinforcement Learning environment for Diplomacy.
|
| 7 |
+
|
| 8 |
+
This class wraps the Diplomacy game engine to provide an interface
|
| 9 |
+
compliant with the MARL standard.
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
def __init__(self, random_seed=None, map_name="standard", game_id=None, rules=None, max_steps=50):
|
| 13 |
+
"""Initialize the Diplomacy environment.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
map_name: The name of the map to use (default: "standard")
|
| 17 |
+
game_id: Optional game ID
|
| 18 |
+
rules: Optional rules to apply to the game
|
| 19 |
+
max_steps: Maximum number of steps before forcing game end (default: 10)
|
| 20 |
+
"""
|
| 21 |
+
self.random_seed = random_seed
|
| 22 |
+
self.map_name = map_name
|
| 23 |
+
self.game_id = game_id
|
| 24 |
+
self.rules = rules or []
|
| 25 |
+
self.game = None
|
| 26 |
+
self.active_powers = []
|
| 27 |
+
self.render_mode = None
|
| 28 |
+
self.max_steps = max_steps
|
| 29 |
+
self.current_steps = 0
|
| 30 |
+
|
| 31 |
+
def reset(self):
|
| 32 |
+
"""Reset the environment to an initial state and return the initial observation.
|
| 33 |
+
|
| 34 |
+
Returns:
|
| 35 |
+
observation: A dictionary where keys are agent identifiers and values are observations.
|
| 36 |
+
"""
|
| 37 |
+
# Initialize a new game
|
| 38 |
+
self.game = Game(game_id=self.game_id, map_name=self.map_name)
|
| 39 |
+
|
| 40 |
+
# Apply rules
|
| 41 |
+
for rule in self.rules:
|
| 42 |
+
self.game.add_rule(rule)
|
| 43 |
+
|
| 44 |
+
# Determine active powers (not eliminated)
|
| 45 |
+
self.active_powers = [name for name, power in self.game.powers.items()
|
| 46 |
+
if not power.is_eliminated()]
|
| 47 |
+
|
| 48 |
+
# Reset step counter
|
| 49 |
+
self.current_steps = 0
|
| 50 |
+
|
| 51 |
+
# Create initial observations for all powers
|
| 52 |
+
observations = {}
|
| 53 |
+
for power_name in self.active_powers:
|
| 54 |
+
observations[power_name] = self._create_observation(power_name)
|
| 55 |
+
|
| 56 |
+
return observations
|
| 57 |
+
|
| 58 |
+
def step(self, actions):
|
| 59 |
+
"""Take a step in the environment using the provided actions.
|
| 60 |
+
|
| 61 |
+
Args:
|
| 62 |
+
actions: A dictionary where keys are agent identifiers and values are actions.
|
| 63 |
+
|
| 64 |
+
Returns:
|
| 65 |
+
observations: A dictionary where keys are agent identifiers and values are observations.
|
| 66 |
+
done: Whether the episode has ended.
|
| 67 |
+
info: Additional information about the environment.
|
| 68 |
+
"""
|
| 69 |
+
print(f"stepping {self.current_steps}")
|
| 70 |
+
self.current_steps += 1
|
| 71 |
+
# Apply actions (orders) for each power
|
| 72 |
+
for power_name, action in actions.items():
|
| 73 |
+
if power_name in self.active_powers:
|
| 74 |
+
orders = action.get("orders", [])
|
| 75 |
+
wait = action.get("wait", True)
|
| 76 |
+
|
| 77 |
+
# Set orders for the power
|
| 78 |
+
if orders:
|
| 79 |
+
self.game.set_orders(power_name, orders)
|
| 80 |
+
|
| 81 |
+
# Set wait flag
|
| 82 |
+
self.game.set_wait(power_name, wait)
|
| 83 |
+
|
| 84 |
+
# Check if all active powers are ready to proceed
|
| 85 |
+
if self.game.does_not_wait():
|
| 86 |
+
# Process the current phase
|
| 87 |
+
self.game.process()
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
# Update active powers list after processing
|
| 91 |
+
self.active_powers = [name for name, power in self.game.powers.items()
|
| 92 |
+
if not power.is_eliminated()]
|
| 93 |
+
|
| 94 |
+
# Create observations for all active powers
|
| 95 |
+
observations = {}
|
| 96 |
+
for power_name in self.active_powers:
|
| 97 |
+
observations[power_name] = self._create_observation(power_name)
|
| 98 |
+
|
| 99 |
+
# Check if the game is done (either naturally or due to max steps)
|
| 100 |
+
done = self.game.is_game_done or self.current_steps >= self.max_steps
|
| 101 |
+
|
| 102 |
+
# Create info dict
|
| 103 |
+
info = {
|
| 104 |
+
"phase": self.game.get_current_phase(),
|
| 105 |
+
"active_powers": self.active_powers,
|
| 106 |
+
"centers": self.game.get_centers(),
|
| 107 |
+
"units": self.game.get_units(),
|
| 108 |
+
"current_steps": self.current_steps,
|
| 109 |
+
"max_steps_reached": self.current_steps >= self.max_steps
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
return observations, done, info
|
| 113 |
+
|
| 114 |
+
def _create_observation(self, power_name):
|
| 115 |
+
"""Create observation for a specific power.
|
| 116 |
+
|
| 117 |
+
Args:
|
| 118 |
+
power_name: The name of the power
|
| 119 |
+
|
| 120 |
+
Returns:
|
| 121 |
+
An observation dictionary
|
| 122 |
+
"""
|
| 123 |
+
observation = {
|
| 124 |
+
"phase": self.game.get_current_phase(),
|
| 125 |
+
"units": self.game.get_units(),
|
| 126 |
+
"centers": self.game.get_centers(),
|
| 127 |
+
"orderable_locations": self.game.get_orderable_locations(power_name),
|
| 128 |
+
"order_status": self.game.get_order_status(power_name),
|
| 129 |
+
"possible_orders": self._get_possible_orders_for_power(power_name)
|
| 130 |
+
}
|
| 131 |
+
return observation
|
| 132 |
+
|
| 133 |
+
def _get_possible_orders_for_power(self, power_name):
|
| 134 |
+
"""Get all possible orders for a power's units.
|
| 135 |
+
|
| 136 |
+
Args:
|
| 137 |
+
power_name: The name of the power
|
| 138 |
+
|
| 139 |
+
Returns:
|
| 140 |
+
A dictionary mapping units to their possible orders
|
| 141 |
+
"""
|
| 142 |
+
all_possible_orders = self.game.get_all_possible_orders()
|
| 143 |
+
|
| 144 |
+
# Filter for only the locations where this power has units
|
| 145 |
+
power_units = self.game.get_units(power_name)
|
| 146 |
+
power_unit_locations = [unit[2:] for unit in power_units]
|
| 147 |
+
|
| 148 |
+
# For retreat phases, include retreating units
|
| 149 |
+
if self.game.phase_type == 'R':
|
| 150 |
+
power = self.game.get_power(power_name)
|
| 151 |
+
power_unit_locations.extend([unit[2:] for unit in power.retreats])
|
| 152 |
+
|
| 153 |
+
# For adjustment phases, include buildable locations
|
| 154 |
+
elif self.game.phase_type == 'A':
|
| 155 |
+
power = self.game.get_power(power_name)
|
| 156 |
+
# If we have more centers than units, we can build
|
| 157 |
+
if len(power.centers) > len(power.units):
|
| 158 |
+
buildable_sites = self.game._build_sites(power)
|
| 159 |
+
power_unit_locations.extend(buildable_sites)
|
| 160 |
+
# If we have more units than centers, we need to remove
|
| 161 |
+
elif len(power.units) > len(power.centers):
|
| 162 |
+
# All units are candidates for removal
|
| 163 |
+
pass
|
| 164 |
+
|
| 165 |
+
# Filter the possible orders to only those for this power's units/locations
|
| 166 |
+
power_possible_orders = {}
|
| 167 |
+
for loc, orders in all_possible_orders.items():
|
| 168 |
+
if loc[:3] in power_unit_locations:
|
| 169 |
+
power_possible_orders[loc] = orders
|
| 170 |
+
|
| 171 |
+
return power_possible_orders
|
| 172 |
+
|
| 173 |
+
def get_log_info(self):
|
| 174 |
+
"""Get additional information about the environment for logging.
|
| 175 |
+
|
| 176 |
+
Returns:
|
| 177 |
+
log_info: Information about the environment required to log the game.
|
| 178 |
+
"""
|
| 179 |
+
if not self.game:
|
| 180 |
+
return {}
|
| 181 |
+
|
| 182 |
+
return {
|
| 183 |
+
"game_id": self.game.game_id,
|
| 184 |
+
"phase": self.game.get_current_phase(),
|
| 185 |
+
"map_name": self.game.map_name,
|
| 186 |
+
"centers": self.game.get_centers(),
|
| 187 |
+
"units": self.game.get_units(),
|
| 188 |
+
"powers": {name: {
|
| 189 |
+
"units": power.units,
|
| 190 |
+
"centers": power.centers,
|
| 191 |
+
"is_eliminated": power.is_eliminated(),
|
| 192 |
+
"order_status": self.game.get_order_status(name)
|
| 193 |
+
} for name, power in self.game.powers.items()},
|
| 194 |
+
"orders": self.game.get_orders(),
|
| 195 |
+
"active_powers": self.active_powers,
|
| 196 |
+
"is_game_done": self.game.is_game_done,
|
| 197 |
+
"outcome": self.game.outcome if self.game.is_game_done else None
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
def render(self, mode='human'):
|
| 201 |
+
"""Render the current state of the environment.
|
| 202 |
+
|
| 203 |
+
Args:
|
| 204 |
+
mode: The rendering mode ('human', 'svg', etc.)
|
| 205 |
+
|
| 206 |
+
Returns:
|
| 207 |
+
The rendered image if applicable
|
| 208 |
+
"""
|
| 209 |
+
self.render_mode = mode
|
| 210 |
+
if self.game:
|
| 211 |
+
if mode == 'human':
|
| 212 |
+
# Just print basic game state
|
| 213 |
+
print(f"Game: {self.game.game_id}")
|
| 214 |
+
print(f"Phase: {self.game.get_current_phase()}")
|
| 215 |
+
print(f"Active Powers: {self.active_powers}")
|
| 216 |
+
print("Supply Centers:")
|
| 217 |
+
for power_name, centers in self.game.get_centers().items():
|
| 218 |
+
print(f" {power_name}: {centers}")
|
| 219 |
+
print("Units:")
|
| 220 |
+
for power_name, units in self.game.get_units().items():
|
| 221 |
+
print(f" {power_name}: {units}")
|
| 222 |
+
return None
|
| 223 |
+
elif mode == 'svg':
|
| 224 |
+
# Return SVG representation
|
| 225 |
+
return self.game.render(output_format='svg')
|
| 226 |
+
return None
|
| 227 |
+
|
| 228 |
+
def close(self):
|
| 229 |
+
"""Perform any necessary cleanup."""
|
| 230 |
+
self.game = None
|
src_code_for_reproducibility/markov_games/diplomacy/diplomacy_logging.py
ADDED
|
@@ -0,0 +1,360 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
from utils.common_imports import *
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def diplomacy_log_match(
|
| 8 |
+
path,
|
| 9 |
+
agents_log_info,
|
| 10 |
+
env_log_info,
|
| 11 |
+
metrics_func=None,
|
| 12 |
+
metrics_func_args=None
|
| 13 |
+
):
|
| 14 |
+
"""
|
| 15 |
+
Logs the Diplomacy game data and generates HTML visualizations using the get_log_info methods.
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
path (str): Base path to save the data.
|
| 19 |
+
agents_log_info (list): List of agent information dictionaries containing the get_log_info results.
|
| 20 |
+
env_log_info (dict): Environment information from its get_log_info method.
|
| 21 |
+
metrics_func (str, optional): Name of the function to calculate metrics.
|
| 22 |
+
metrics_func_args (dict, optional): Arguments for the metrics function.
|
| 23 |
+
"""
|
| 24 |
+
# Create directory structure
|
| 25 |
+
os.makedirs(path, exist_ok=True)
|
| 26 |
+
|
| 27 |
+
# Save the environment log info
|
| 28 |
+
env_log_path = os.path.join(path, "env_log.json")
|
| 29 |
+
with open(env_log_path, "w") as f:
|
| 30 |
+
json.dump(env_log_info, f, indent=4, default=_json_serialize)
|
| 31 |
+
|
| 32 |
+
# Process each agent's log info
|
| 33 |
+
for agent_log in agents_log_info:
|
| 34 |
+
power_name = agent_log["power_name"]
|
| 35 |
+
|
| 36 |
+
# Define paths for raw data and statistics subfolders
|
| 37 |
+
power_path = os.path.join(path, power_name)
|
| 38 |
+
raw_data_path = os.path.join(power_path, "raw_data")
|
| 39 |
+
statistics_path = os.path.join(power_path, "statistics")
|
| 40 |
+
|
| 41 |
+
# Ensure directories exist
|
| 42 |
+
os.makedirs(raw_data_path, exist_ok=True)
|
| 43 |
+
os.makedirs(statistics_path, exist_ok=True)
|
| 44 |
+
|
| 45 |
+
# Determine the next available file number for raw data
|
| 46 |
+
raw_files = os.listdir(raw_data_path)
|
| 47 |
+
raw_numbers = [int(f.split('_')[-1].split('.')[0]) for f in raw_files if f.startswith("log_")]
|
| 48 |
+
next_raw_number = max(raw_numbers, default=0) + 1
|
| 49 |
+
raw_file = os.path.join(raw_data_path, f"log_{next_raw_number}.json")
|
| 50 |
+
|
| 51 |
+
# Save agent log info
|
| 52 |
+
with open(raw_file, "w") as f:
|
| 53 |
+
json.dump(agent_log, f, indent=4, default=_json_serialize)
|
| 54 |
+
|
| 55 |
+
# Log metrics if a metrics function is provided
|
| 56 |
+
if metrics_func:
|
| 57 |
+
metrics_files = os.listdir(statistics_path)
|
| 58 |
+
metrics_numbers = [int(f.split('_')[-1].split('.')[0]) for f in metrics_files if f.startswith("metrics_")]
|
| 59 |
+
next_metrics_number = max(metrics_numbers, default=0) + 1
|
| 60 |
+
metrics_file = os.path.join(statistics_path, f"metrics_{next_metrics_number}.json")
|
| 61 |
+
|
| 62 |
+
metrics = globals()[metrics_func](agent_log, info, **metrics_func_args)
|
| 63 |
+
with open(metrics_file, "w") as f:
|
| 64 |
+
json.dump(metrics, f, indent=4)
|
| 65 |
+
|
| 66 |
+
# Generate the HTML visualization
|
| 67 |
+
html_content = generate_diplomacy_html(agents_log_info, env_log_info)
|
| 68 |
+
|
| 69 |
+
# Ensure the html directory exists
|
| 70 |
+
html_path = os.path.join(path, "html")
|
| 71 |
+
os.makedirs(html_path, exist_ok=True)
|
| 72 |
+
|
| 73 |
+
# Determine the next available file number for HTML
|
| 74 |
+
html_files = os.listdir(html_path)
|
| 75 |
+
html_numbers = [int(f.split('_')[-1].split('.')[0]) for f in html_files if f.startswith("game_summary_")]
|
| 76 |
+
next_html_number = max(html_numbers, default=0) + 1
|
| 77 |
+
html_file = os.path.join(html_path, f"game_summary_{next_html_number}.html")
|
| 78 |
+
|
| 79 |
+
# Save the HTML content to a file
|
| 80 |
+
with open(html_file, "w") as f:
|
| 81 |
+
f.write(html_content)
|
| 82 |
+
|
| 83 |
+
def generate_diplomacy_html(agent_infos, env_info):
|
| 84 |
+
"""
|
| 85 |
+
Generate HTML visualization for a Diplomacy game.
|
| 86 |
+
|
| 87 |
+
Args:
|
| 88 |
+
agent_infos (list): List of agent information dictionaries from get_log_info.
|
| 89 |
+
env_info (dict): Environment information from get_log_info.
|
| 90 |
+
|
| 91 |
+
Returns:
|
| 92 |
+
str: HTML content for the game visualization.
|
| 93 |
+
"""
|
| 94 |
+
# Extract game information
|
| 95 |
+
game_id = env_info.get("game_id", "Unknown")
|
| 96 |
+
phase = env_info.get("phase", "Unknown")
|
| 97 |
+
map_name = env_info.get("map_name", "standard")
|
| 98 |
+
is_game_done = env_info.get("is_game_done", False)
|
| 99 |
+
outcome = env_info.get("outcome", [])
|
| 100 |
+
|
| 101 |
+
centers = env_info.get("centers", {})
|
| 102 |
+
units = env_info.get("units", {})
|
| 103 |
+
|
| 104 |
+
# HTML head and style
|
| 105 |
+
html_content = """
|
| 106 |
+
<!DOCTYPE html>
|
| 107 |
+
<html lang="en">
|
| 108 |
+
<head>
|
| 109 |
+
<meta charset="UTF-8">
|
| 110 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 111 |
+
<title>Diplomacy Game {game_id}</title>
|
| 112 |
+
<style>
|
| 113 |
+
body {{
|
| 114 |
+
font-family: 'Arial', sans-serif;
|
| 115 |
+
background-color: #f5f5f5;
|
| 116 |
+
color: #333333;
|
| 117 |
+
margin: 0;
|
| 118 |
+
padding: 20px;
|
| 119 |
+
}}
|
| 120 |
+
.container {{
|
| 121 |
+
display: grid;
|
| 122 |
+
grid-template-columns: repeat(3, 1fr);
|
| 123 |
+
grid-gap: 20px;
|
| 124 |
+
margin-bottom: 30px;
|
| 125 |
+
}}
|
| 126 |
+
.central-info {{
|
| 127 |
+
grid-column: span 3;
|
| 128 |
+
background: #fff;
|
| 129 |
+
padding: 20px;
|
| 130 |
+
border-radius: 10px;
|
| 131 |
+
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1);
|
| 132 |
+
margin-bottom: 20px;
|
| 133 |
+
}}
|
| 134 |
+
.power-column {{
|
| 135 |
+
background: #fff;
|
| 136 |
+
padding: 15px;
|
| 137 |
+
border-radius: 10px;
|
| 138 |
+
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1);
|
| 139 |
+
}}
|
| 140 |
+
.message {{
|
| 141 |
+
margin-bottom: 15px;
|
| 142 |
+
padding: 12px;
|
| 143 |
+
border-radius: 8px;
|
| 144 |
+
box-shadow: 0 1px 4px rgba(0, 0, 0, 0.1);
|
| 145 |
+
}}
|
| 146 |
+
.user {{
|
| 147 |
+
background: rgba(235, 245, 255, 0.8);
|
| 148 |
+
border-left: 4px solid #007bff;
|
| 149 |
+
}}
|
| 150 |
+
.assistant {{
|
| 151 |
+
background: rgba(240, 255, 240, 0.8);
|
| 152 |
+
border-right: 4px solid #28a745;
|
| 153 |
+
}}
|
| 154 |
+
.orders {{
|
| 155 |
+
background: rgba(255, 248, 225, 0.8);
|
| 156 |
+
border-left: 4px solid #ffc107;
|
| 157 |
+
}}
|
| 158 |
+
.role {{
|
| 159 |
+
font-weight: bold;
|
| 160 |
+
margin-bottom: 5px;
|
| 161 |
+
color: #333333;
|
| 162 |
+
}}
|
| 163 |
+
.power-name {{
|
| 164 |
+
text-align: center;
|
| 165 |
+
font-size: 1.4em;
|
| 166 |
+
margin-bottom: 15px;
|
| 167 |
+
color: #000;
|
| 168 |
+
font-weight: 600;
|
| 169 |
+
text-transform: uppercase;
|
| 170 |
+
letter-spacing: 1px;
|
| 171 |
+
}}
|
| 172 |
+
.game-info {{
|
| 173 |
+
display: grid;
|
| 174 |
+
grid-template-columns: repeat(2, 1fr);
|
| 175 |
+
grid-gap: 15px;
|
| 176 |
+
}}
|
| 177 |
+
.info-card {{
|
| 178 |
+
background: #f9f9f9;
|
| 179 |
+
padding: 15px;
|
| 180 |
+
border-radius: 8px;
|
| 181 |
+
box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
|
| 182 |
+
}}
|
| 183 |
+
.supply-centers, .units-list {{
|
| 184 |
+
display: flex;
|
| 185 |
+
flex-wrap: wrap;
|
| 186 |
+
justify-content: space-between;
|
| 187 |
+
}}
|
| 188 |
+
.supply-center, .unit {{
|
| 189 |
+
flex: 0 0 30%;
|
| 190 |
+
margin-bottom: 10px;
|
| 191 |
+
padding: 8px;
|
| 192 |
+
background: #f0f0f0;
|
| 193 |
+
border-radius: 5px;
|
| 194 |
+
text-align: center;
|
| 195 |
+
}}
|
| 196 |
+
h2 {{
|
| 197 |
+
border-bottom: 2px solid #eee;
|
| 198 |
+
padding-bottom: 10px;
|
| 199 |
+
margin-top: 0;
|
| 200 |
+
}}
|
| 201 |
+
.outcome {{
|
| 202 |
+
background: #e8f5e9;
|
| 203 |
+
padding: 15px;
|
| 204 |
+
border-radius: 8px;
|
| 205 |
+
margin-top: 15px;
|
| 206 |
+
font-weight: bold;
|
| 207 |
+
text-align: center;
|
| 208 |
+
}}
|
| 209 |
+
.austria {{ border-top: 5px solid #ff5050; }}
|
| 210 |
+
.england {{ border-top: 5px solid #5050ff; }}
|
| 211 |
+
.france {{ border-top: 5px solid #50c0ff; }}
|
| 212 |
+
.germany {{ border-top: 5px solid #808080; }}
|
| 213 |
+
.italy {{ border-top: 5px solid #50ff50; }}
|
| 214 |
+
.russia {{ border-top: 5px solid #ffffff; border: 1px solid #ccc; }}
|
| 215 |
+
.turkey {{ border-top: 5px solid #c0c000; }}
|
| 216 |
+
</style>
|
| 217 |
+
</head>
|
| 218 |
+
<body>
|
| 219 |
+
<div class="central-info">
|
| 220 |
+
<h2>Game Information</h2>
|
| 221 |
+
<div class="game-info">
|
| 222 |
+
<div class="info-card">
|
| 223 |
+
<h3>Game Details</h3>
|
| 224 |
+
<p><strong>Game ID:</strong> {game_id}</p>
|
| 225 |
+
<p><strong>Phase:</strong> {phase}</p>
|
| 226 |
+
<p><strong>Map:</strong> {map_name}</p>
|
| 227 |
+
<p><strong>Status:</strong> {status}</p>
|
| 228 |
+
</div>
|
| 229 |
+
<div class="info-card">
|
| 230 |
+
<h3>Supply Centers</h3>
|
| 231 |
+
<div class="supply-centers">
|
| 232 |
+
""".format(
|
| 233 |
+
game_id=game_id,
|
| 234 |
+
phase=phase,
|
| 235 |
+
map_name=map_name,
|
| 236 |
+
status="Completed" if is_game_done else "Active"
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
# Add supply center information
|
| 240 |
+
for power, power_centers in centers.items():
|
| 241 |
+
html_content += f"""
|
| 242 |
+
<div class="supply-center">
|
| 243 |
+
<strong>{power}:</strong> {len(power_centers)}
|
| 244 |
+
</div>
|
| 245 |
+
"""
|
| 246 |
+
|
| 247 |
+
html_content += """
|
| 248 |
+
</div>
|
| 249 |
+
</div>
|
| 250 |
+
</div>
|
| 251 |
+
"""
|
| 252 |
+
|
| 253 |
+
# Add outcome if game is done
|
| 254 |
+
if is_game_done and outcome:
|
| 255 |
+
winners = outcome[1:] if len(outcome) > 1 else ["Draw"]
|
| 256 |
+
html_content += f"""
|
| 257 |
+
<div class="outcome">
|
| 258 |
+
<h3>Game Outcome</h3>
|
| 259 |
+
<p>Winners: {', '.join(winners)}</p>
|
| 260 |
+
</div>
|
| 261 |
+
"""
|
| 262 |
+
|
| 263 |
+
html_content += """
|
| 264 |
+
</div>
|
| 265 |
+
<div class="container">
|
| 266 |
+
"""
|
| 267 |
+
|
| 268 |
+
# Add each power's information
|
| 269 |
+
for agent_log in agent_infos:
|
| 270 |
+
power_name = agent_log["power_name"]
|
| 271 |
+
power_class = power_name.lower()
|
| 272 |
+
orders = agent_log.get("orders", [])
|
| 273 |
+
message_history = agent_log.get("message_history", [])
|
| 274 |
+
|
| 275 |
+
html_content += f"""
|
| 276 |
+
<div class="power-column {power_class}">
|
| 277 |
+
<div class="power-name">{power_name}</div>
|
| 278 |
+
|
| 279 |
+
<div class="info-card">
|
| 280 |
+
<h3>Units</h3>
|
| 281 |
+
<ul>
|
| 282 |
+
"""
|
| 283 |
+
|
| 284 |
+
# Add units information
|
| 285 |
+
power_units = units.get(power_name, [])
|
| 286 |
+
for unit in power_units:
|
| 287 |
+
html_content += f"<li>{unit}</li>"
|
| 288 |
+
|
| 289 |
+
html_content += """
|
| 290 |
+
</ul>
|
| 291 |
+
</div>
|
| 292 |
+
|
| 293 |
+
<div class="message orders">
|
| 294 |
+
<div class="role">Final Orders</div>
|
| 295 |
+
<ul>
|
| 296 |
+
"""
|
| 297 |
+
|
| 298 |
+
# Add orders
|
| 299 |
+
for order in orders:
|
| 300 |
+
html_content += f"<li>{order}</li>"
|
| 301 |
+
|
| 302 |
+
html_content += """
|
| 303 |
+
</ul>
|
| 304 |
+
</div>
|
| 305 |
+
"""
|
| 306 |
+
|
| 307 |
+
# Add message history
|
| 308 |
+
for message in message_history:
|
| 309 |
+
if isinstance(message, dict):
|
| 310 |
+
# Skip system messages or handle differently
|
| 311 |
+
if message.get("role") == "system":
|
| 312 |
+
continue
|
| 313 |
+
|
| 314 |
+
role = message.get("role", "unknown")
|
| 315 |
+
content = message.get("content", "")
|
| 316 |
+
|
| 317 |
+
role_class = "user" if role == "user" else "assistant"
|
| 318 |
+
role_display = "Environment" if role == "user" else f"LLM ({power_name})"
|
| 319 |
+
|
| 320 |
+
# Escape HTML characters in content
|
| 321 |
+
content = content.replace("<", "<").replace(">", ">").replace("\n", "<br>")
|
| 322 |
+
|
| 323 |
+
html_content += f"""
|
| 324 |
+
<div class="message {role_class}">
|
| 325 |
+
<div class="role">{role_display}</div>
|
| 326 |
+
<p>{content}</p>
|
| 327 |
+
</div>
|
| 328 |
+
"""
|
| 329 |
+
elif isinstance(message, str):
|
| 330 |
+
# Simple string messages (may be used in some implementations)
|
| 331 |
+
html_content += f"""
|
| 332 |
+
<div class="message">
|
| 333 |
+
<p>{message}</p>
|
| 334 |
+
</div>
|
| 335 |
+
"""
|
| 336 |
+
|
| 337 |
+
html_content += """
|
| 338 |
+
</div>
|
| 339 |
+
"""
|
| 340 |
+
|
| 341 |
+
html_content += """
|
| 342 |
+
</div>
|
| 343 |
+
</body>
|
| 344 |
+
</html>
|
| 345 |
+
"""
|
| 346 |
+
|
| 347 |
+
return html_content
|
| 348 |
+
|
| 349 |
+
def _json_serialize(obj):
|
| 350 |
+
"""
|
| 351 |
+
A helper function to convert non-JSON-serializable objects
|
| 352 |
+
(like OrderResult) into strings or dicts.
|
| 353 |
+
"""
|
| 354 |
+
# Check for the specific object types you know are problematic
|
| 355 |
+
if obj.__class__.__name__ == "OrderResult":
|
| 356 |
+
# Return a string representation or a dict
|
| 357 |
+
return str(obj)
|
| 358 |
+
|
| 359 |
+
# Fallback: attempt to convert anything else to string
|
| 360 |
+
return str(obj)
|
src_code_for_reproducibility/markov_games/diplomacy/diplomacy_logging_for_training.py
ADDED
|
File without changes
|
src_code_for_reproducibility/markov_games/ipd/Ipd_hard_coded_agents.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
from typing import Any, Tuple
|
| 3 |
+
|
| 4 |
+
from mllm.markov_games.ipd.ipd_agent import IPDAgent
|
| 5 |
+
from mllm.markov_games.rollout_tree import AgentActLog, ChatTurn
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@dataclass
|
| 9 |
+
class AlwaysCooperateIPDAgent(IPDAgent):
|
| 10 |
+
async def act(self, observation) -> Tuple[Any, AgentActLog]:
|
| 11 |
+
"""
|
| 12 |
+
Always plays the cooperate action, ignoring observation.
|
| 13 |
+
Returns the configured cooperate_string so the simulation parses it as "C".
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
action = self.cooperate_string
|
| 17 |
+
|
| 18 |
+
# Log a minimal, structured chat turn for consistency with other agents
|
| 19 |
+
turn_text = f"Playing cooperate: {action}"
|
| 20 |
+
self.state.chat_history.append(
|
| 21 |
+
ChatTurn(
|
| 22 |
+
agent_id=self.agent_id,
|
| 23 |
+
role="assistant",
|
| 24 |
+
content=turn_text,
|
| 25 |
+
is_state_end=True,
|
| 26 |
+
)
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
act_log = AgentActLog(
|
| 30 |
+
chat_turns=[self.state.chat_history[-1]],
|
| 31 |
+
info=None,
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
# Advance internal counters similar to IPDAgent semantics
|
| 35 |
+
self.state.chat_counter = len(self.state.chat_history)
|
| 36 |
+
self.state.round_nb = observation.round_nb
|
| 37 |
+
|
| 38 |
+
return action, act_log
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
@dataclass
|
| 42 |
+
class AlwaysDefectIPDAgent(IPDAgent):
|
| 43 |
+
async def act(self, observation) -> Tuple[Any, AgentActLog]:
|
| 44 |
+
"""
|
| 45 |
+
Always plays the defect action, ignoring observation.
|
| 46 |
+
Returns the configured defect_string so the simulation parses it as "D".
|
| 47 |
+
"""
|
| 48 |
+
|
| 49 |
+
action = self.defect_string
|
| 50 |
+
|
| 51 |
+
# Log a minimal, structured chat turn for consistency with other agents
|
| 52 |
+
turn_text = f"Playing defect: {action}"
|
| 53 |
+
self.state.chat_history.append(
|
| 54 |
+
ChatTurn(
|
| 55 |
+
agent_id=self.agent_id,
|
| 56 |
+
role="assistant",
|
| 57 |
+
content=turn_text,
|
| 58 |
+
is_state_end=True,
|
| 59 |
+
)
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
act_log = AgentActLog(
|
| 63 |
+
chat_turns=[self.state.chat_history[-1]],
|
| 64 |
+
info=None,
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
# Advance internal counters similar to IPDAgent semantics
|
| 68 |
+
self.state.chat_counter = len(self.state.chat_history)
|
| 69 |
+
self.state.round_nb = observation.round_nb
|
| 70 |
+
|
| 71 |
+
return action, act_log
|
| 72 |
+
|
src_code_for_reproducibility/markov_games/ipd/__init__.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .Ipd_hard_coded_agents import AlwaysCooperateIPDAgent, AlwaysDefectIPDAgent
|
| 2 |
+
|
| 3 |
+
__all__ = [
|
| 4 |
+
"AlwaysCooperateIPDAgent",
|
| 5 |
+
"AlwaysDefectIPDAgent",
|
| 6 |
+
]
|
| 7 |
+
|
src_code_for_reproducibility/markov_games/ipd/ipd_agent.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import json
|
| 3 |
+
import random
|
| 4 |
+
import re
|
| 5 |
+
from collections.abc import Callable
|
| 6 |
+
from copy import deepcopy
|
| 7 |
+
from dataclasses import dataclass, field
|
| 8 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 9 |
+
|
| 10 |
+
from mllm.markov_games.agent import Agent
|
| 11 |
+
from mllm.markov_games.rollout_tree import AgentActLog, ChatTurn
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@dataclass
|
| 15 |
+
class IPDAgentState:
|
| 16 |
+
"""
|
| 17 |
+
TOWRITE
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
nb_retries: int
|
| 21 |
+
round_nb: int
|
| 22 |
+
chat_counter: int
|
| 23 |
+
chat_history: List[ChatTurn]
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
@dataclass
|
| 27 |
+
class IPDAgent(Agent):
|
| 28 |
+
seed: int
|
| 29 |
+
agent_id: str
|
| 30 |
+
agent_name: str
|
| 31 |
+
policy: Callable[[List[Dict]], str]
|
| 32 |
+
intro_prompt: str # Introduction prompt explaining the game rules
|
| 33 |
+
goal_prompt: str # Prompt explaining the agent's goal
|
| 34 |
+
strategy_prompt: str # Prompt suggesting a strategy to the agent
|
| 35 |
+
max_errors: int # Maximum number of errors allowed before default action
|
| 36 |
+
allow_reasoning: bool # Whether to allow reasoning in the response
|
| 37 |
+
max_reasoning_chars: int # Maximum number of characters for reasoning
|
| 38 |
+
cooperate_string: str # string parsed as playing cooperate by simulation
|
| 39 |
+
defect_string: str # string parsed as playing defect by simulation
|
| 40 |
+
|
| 41 |
+
def __post_init__(self):
|
| 42 |
+
self.state = IPDAgentState(
|
| 43 |
+
nb_retries=0, round_nb=0, chat_counter=0, chat_history=[]
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
async def act(self, observation) -> Tuple[Any, AgentActLog]:
|
| 47 |
+
"""
|
| 48 |
+
TOWRITE
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
action = None
|
| 52 |
+
action_is_ready = False
|
| 53 |
+
round_nb = observation.round_nb
|
| 54 |
+
|
| 55 |
+
# If it's the first round, we need to send the intro prompt
|
| 56 |
+
if round_nb == 0 and self.state.chat_counter == 0:
|
| 57 |
+
self.state.chat_history.append(
|
| 58 |
+
ChatTurn(
|
| 59 |
+
agent_id=self.agent_id,
|
| 60 |
+
role="user",
|
| 61 |
+
content=self.intro_prompt,
|
| 62 |
+
is_state_end=True,
|
| 63 |
+
)
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
# If new round
|
| 67 |
+
if round_nb > self.state.round_nb:
|
| 68 |
+
coagent_action = observation.last_coagent_move
|
| 69 |
+
user_message = f"Last round, the other agent played {coagent_action}."
|
| 70 |
+
self.state.chat_history.append(
|
| 71 |
+
ChatTurn(
|
| 72 |
+
agent_id=self.agent_id,
|
| 73 |
+
role="user",
|
| 74 |
+
content=user_message,
|
| 75 |
+
is_state_end=True,
|
| 76 |
+
)
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
# If not new round, try to get valid action from policy
|
| 80 |
+
output_chat_turn: ChatTurn = await self.policy(
|
| 81 |
+
state=self.state.chat_history,
|
| 82 |
+
agent_id=self.agent_id,
|
| 83 |
+
regex=f"({self.cooperate_string}|{self.defect_string})",
|
| 84 |
+
)
|
| 85 |
+
self.state.chat_history.append(output_chat_turn)
|
| 86 |
+
action = output_chat_turn.content
|
| 87 |
+
|
| 88 |
+
agent_step_log = AgentActLog(
|
| 89 |
+
chat_turns=self.state.chat_history[self.state.chat_counter :], info=None
|
| 90 |
+
)
|
| 91 |
+
self.state.chat_counter = len(self.state.chat_history)
|
| 92 |
+
self.state.round_nb = round_nb
|
| 93 |
+
|
| 94 |
+
return action, agent_step_log
|
| 95 |
+
|
| 96 |
+
def get_safe_copy(self):
|
| 97 |
+
"""
|
| 98 |
+
Return a safe copy of the agent.
|
| 99 |
+
"""
|
| 100 |
+
agent_copy = copy.copy(self)
|
| 101 |
+
agent_copy.state = copy.deepcopy(self.state)
|
| 102 |
+
return agent_copy
|
| 103 |
+
|
| 104 |
+
def reset(self):
|
| 105 |
+
self.state = IPDAgentState()
|
| 106 |
+
raise NotImplementedError
|
| 107 |
+
|
| 108 |
+
def render(self):
|
| 109 |
+
pass
|
| 110 |
+
|
| 111 |
+
def close(self):
|
| 112 |
+
pass
|
| 113 |
+
|
| 114 |
+
def get_agent_info(self):
|
| 115 |
+
pass
|
src_code_for_reproducibility/markov_games/ipd/ipd_simulation.py
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import random
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
from typing import Any, Dict, List, Optional, Tuple
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
from mllm.markov_games.markov_game import Simulation
|
| 9 |
+
from mllm.markov_games.rollout_tree import SimulationStepLog
|
| 10 |
+
from mllm.utils.get_coagent_id import get_coagent_id
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@dataclass
|
| 14 |
+
class IPDState:
|
| 15 |
+
"""
|
| 16 |
+
State of the Iterated Prisoner's Dilemma game.
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
round_nb: int = 0
|
| 20 |
+
done: bool = False
|
| 21 |
+
last_moves: Dict[str, str] | None = None
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@dataclass
|
| 25 |
+
class IPDObs:
|
| 26 |
+
"""
|
| 27 |
+
Observation in Iterated Prisoner's Dilemma game.
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
round_nb: int
|
| 31 |
+
last_coagent_move: str | None
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class IPD(Simulation):
|
| 35 |
+
"""
|
| 36 |
+
Iterated Prisoner's Dilemma simulation following the standard.
|
| 37 |
+
|
| 38 |
+
In each round of the game, two agents simultaneously choose to either cooperate (C) or defect (D).
|
| 39 |
+
The payoffs are as follows:
|
| 40 |
+
- If both cooperate: Both receive the "reward" (usually 3 points)
|
| 41 |
+
- If both defect: Both receive the "punishment" (usually 1 point)
|
| 42 |
+
- If one cooperates and one defects: The defector receives the "temptation" (usually 5 points)
|
| 43 |
+
and the cooperator receives the "sucker" payoff (usually 0 points)
|
| 44 |
+
|
| 45 |
+
The game is played for a specified number of rounds.
|
| 46 |
+
"""
|
| 47 |
+
|
| 48 |
+
def __init__(
|
| 49 |
+
self,
|
| 50 |
+
agent_ids: List[str],
|
| 51 |
+
agent_names: List[str],
|
| 52 |
+
seed: int,
|
| 53 |
+
rounds_per_game: int,
|
| 54 |
+
reward: float, # Both cooperate
|
| 55 |
+
punishment: float, # Both defect
|
| 56 |
+
temptation: float, # Defector's reward when other cooperates
|
| 57 |
+
sucker: float, # Cooperator's reward when other defects
|
| 58 |
+
cooperate_actions: List[str],
|
| 59 |
+
defect_actions: List[str],
|
| 60 |
+
):
|
| 61 |
+
self.agent_ids = agent_ids
|
| 62 |
+
self.agent_names = agent_names
|
| 63 |
+
self.seed = seed
|
| 64 |
+
self.rounds_per_game = rounds_per_game
|
| 65 |
+
self.reward = reward
|
| 66 |
+
self.punishment = punishment
|
| 67 |
+
self.temptation = temptation
|
| 68 |
+
self.sucker = sucker
|
| 69 |
+
self.cooperate_actions = cooperate_actions
|
| 70 |
+
self.defect_actions = defect_actions
|
| 71 |
+
self.state = IPDState()
|
| 72 |
+
|
| 73 |
+
def step(self, actions: Dict[str, str]) -> Tuple[bool, SimulationStepLog]:
|
| 74 |
+
"""
|
| 75 |
+
Take a step in the environment using the provided actions.
|
| 76 |
+
Here, the observations are just the states of the game.
|
| 77 |
+
|
| 78 |
+
Args:
|
| 79 |
+
actions (dict): A dictionary where keys are agent identifiers and values are actions ('C' or 'D').
|
| 80 |
+
|
| 81 |
+
Returns:
|
| 82 |
+
observations (dict): A dictionary where keys are agent identifiers and values are observations.
|
| 83 |
+
done (bool): Whether the episode has ended.
|
| 84 |
+
info (dict): Additional information about the environment.
|
| 85 |
+
"""
|
| 86 |
+
|
| 87 |
+
# Calculate rewards using payoff matrix
|
| 88 |
+
agent0_action = actions[self.agent_ids[0]]
|
| 89 |
+
agent1_action = actions[self.agent_ids[1]]
|
| 90 |
+
|
| 91 |
+
# Normalize actions to standard cooperate/defect/gibberish format
|
| 92 |
+
def normalize_action(action):
|
| 93 |
+
if action in self.cooperate_actions:
|
| 94 |
+
return "C"
|
| 95 |
+
elif action in self.defect_actions:
|
| 96 |
+
return "D"
|
| 97 |
+
else:
|
| 98 |
+
return "D"
|
| 99 |
+
|
| 100 |
+
norm_action0 = normalize_action(agent0_action)
|
| 101 |
+
norm_action1 = normalize_action(agent1_action)
|
| 102 |
+
|
| 103 |
+
payoffs = {
|
| 104 |
+
("C", "C"): [self.reward, self.reward],
|
| 105 |
+
("C", "D"): [self.sucker, self.temptation],
|
| 106 |
+
("D", "C"): [self.temptation, self.sucker],
|
| 107 |
+
("D", "D"): [self.punishment, self.punishment],
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
round_rewards = {
|
| 111 |
+
self.agent_ids[0]: payoffs[(norm_action0, norm_action1)][0],
|
| 112 |
+
self.agent_ids[1]: payoffs[(norm_action0, norm_action1)][1],
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
# Update game state
|
| 116 |
+
self.state.round_nb += 1
|
| 117 |
+
self.state.last_moves = copy.deepcopy(actions)
|
| 118 |
+
done = self.state.round_nb >= self.rounds_per_game
|
| 119 |
+
step_log = SimulationStepLog(
|
| 120 |
+
rewards=round_rewards,
|
| 121 |
+
info={
|
| 122 |
+
"actions": {
|
| 123 |
+
self.agent_ids[0]: norm_action0,
|
| 124 |
+
self.agent_ids[1]: norm_action1,
|
| 125 |
+
}
|
| 126 |
+
},
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
return done, step_log
|
| 130 |
+
|
| 131 |
+
def get_obs(self):
|
| 132 |
+
"""Returns all agent observations in dict
|
| 133 |
+
Returns:
|
| 134 |
+
observations
|
| 135 |
+
"""
|
| 136 |
+
observations = {}
|
| 137 |
+
for agent_id in self.agent_ids:
|
| 138 |
+
observations[agent_id] = self.get_obs_agent(agent_id)
|
| 139 |
+
return observations
|
| 140 |
+
|
| 141 |
+
def get_obs_agent(self, agent_id):
|
| 142 |
+
"""Returns observation for agent_id"""
|
| 143 |
+
if self.state.last_moves != None:
|
| 144 |
+
other_id = get_coagent_id(self.agent_ids, agent_id)
|
| 145 |
+
last_coagent_move = self.state.last_moves[other_id]
|
| 146 |
+
else:
|
| 147 |
+
last_coagent_move = None
|
| 148 |
+
obs = IPDObs(round_nb=self.state.round_nb, last_coagent_move=last_coagent_move)
|
| 149 |
+
return obs
|
| 150 |
+
|
| 151 |
+
def reset(self):
|
| 152 |
+
"""Returns initial observations and states"""
|
| 153 |
+
self.state = IPDState()
|
| 154 |
+
return self.get_obs()
|
| 155 |
+
|
| 156 |
+
def get_safe_copy(self):
|
| 157 |
+
"""
|
| 158 |
+
Return a safe copy of the simulation.
|
| 159 |
+
"""
|
| 160 |
+
simulation_copy = copy.copy(self)
|
| 161 |
+
simulation_copy.state = copy.deepcopy(self.state)
|
| 162 |
+
return simulation_copy
|
src_code_for_reproducibility/markov_games/ipd/ipd_statistics.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import Dict, Callable, List, Tuple
|
| 4 |
+
|
| 5 |
+
from mllm.markov_games.rollout_tree import SimulationStepLog
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def avg_reward(sl: SimulationStepLog) -> List[Tuple[str, float]]:
|
| 9 |
+
for aid in sl.rewards.keys():
|
| 10 |
+
if "buffer" in str(aid) and "live" not in str(aid):
|
| 11 |
+
return None
|
| 12 |
+
# One value per agent at each step
|
| 13 |
+
rewards_dict = {f"reward-{aid}": float(v) for aid, v in (sl.rewards or {}).items()}
|
| 14 |
+
return [(key, value) for key, value in rewards_dict.items() if value is not None]
|
| 15 |
+
|
| 16 |
+
stat_functs: list[Callable[[SimulationStepLog], List[Tuple[str, float]]]] = [
|
| 17 |
+
avg_reward,
|
| 18 |
+
]
|
src_code_for_reproducibility/markov_games/linear_runner.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import json
|
| 3 |
+
import os.path
|
| 4 |
+
|
| 5 |
+
from mllm.markov_games.markov_game import MarkovGame
|
| 6 |
+
from mllm.markov_games.rollout_tree import RolloutTreeNode, RolloutTreeRootNode
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
async def LinearRunner(
|
| 10 |
+
markov_game: MarkovGame, output_folder: str
|
| 11 |
+
) -> RolloutTreeRootNode:
|
| 12 |
+
"""
|
| 13 |
+
This method generates a trajectory without branching.
|
| 14 |
+
"""
|
| 15 |
+
time_step = 0
|
| 16 |
+
terminated = False
|
| 17 |
+
root = RolloutTreeRootNode(
|
| 18 |
+
id=markov_game.get_id(),
|
| 19 |
+
crn_id=markov_game.get_crn_id(),
|
| 20 |
+
agent_ids=markov_game.get_agent_ids(),
|
| 21 |
+
)
|
| 22 |
+
previous_node = root
|
| 23 |
+
while not terminated:
|
| 24 |
+
terminated, step_log = await markov_game.step()
|
| 25 |
+
current_node = RolloutTreeNode(step_log=step_log, time_step=time_step)
|
| 26 |
+
previous_node.child = current_node
|
| 27 |
+
previous_node = current_node
|
| 28 |
+
time_step += 1
|
| 29 |
+
|
| 30 |
+
return root
|
src_code_for_reproducibility/markov_games/negotiation/README.md
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## Negotiation Games: core mechanics and variants
|
| 2 |
+
|
| 3 |
+
This family of games feature two agents who, in each round, may briefly communicate and then simultaneously propose how to split a fixed resource (most commonly 10 coins). Rewards are the amount kept multiplied by an agent’s per-unit value. The starting speaker alternates deterministically across rounds.
|
| 4 |
+
|
| 5 |
+
Communication is optional and variant-dependent: some settings encourage rich messaging to share private information, while others remove messaging entirely to focus on allocation behavior.
|
| 6 |
+
|
| 7 |
+
Proportional splitting is used when the two proposals exceed the available total: allocations are scaled proportionally rather than discarded. This preserves a useful learning signal even when agents over-claim.
|
| 8 |
+
|
| 9 |
+
### Variants (in increasing difficulty)
|
| 10 |
+
|
| 11 |
+
- No‑Press Split
|
| 12 |
+
- Single item type (coins)
|
| 13 |
+
- No communication; agents go straight to making split proposals, with the starting player alternating deterministically.
|
| 14 |
+
- Motivation: mirrors no‑communication setups (e.g., Advantage Alignment) while keeping the split decision nontrivial.
|
| 15 |
+
- Deterministic Mode: values are fixed and public: one agent values coins at 10, the other at 1 (alternates each round).
|
| 16 |
+
- Stochastic Mode: values are random and uncorrelated.
|
| 17 |
+
|
| 18 |
+
- Trust-and-Split RPS (TAS-RPS)
|
| 19 |
+
- Single item type (coins)
|
| 20 |
+
- Each round, a rock–paper–scissors hand draw creates a strong asymmetry: the winner’s per-coin value is 10, the loser’s is 1.
|
| 21 |
+
- Each agent initially sees only their own hand and must communicate to coordinate an optimal split.
|
| 22 |
+
- Motivation: enforce large value disparity so one’s own value reveals little about the other’s (avoiding ceiling effects) and incentivize meaningful communication.
|
| 23 |
+
|
| 24 |
+
- Trust-and-Split (TAS)
|
| 25 |
+
- Single item type (coins); each round, each agent’s per-coin value is independently sampled in a broad range (e.g., 1–20).
|
| 26 |
+
- Each agent observes only their own value; they may use short messages to share and negotiate.
|
| 27 |
+
- Motivation: a simple blend that tests whether agents learn to exchange private information and coordinate proportional, value-aware splits.
|
| 28 |
+
|
| 29 |
+
- Deal-or-No-Deal (DOND)
|
| 30 |
+
- Introduced in [Deal or No Deal? End-to-End Learning for Negotiation Dialogues](https://arxiv.org/pdf/1706.05125)
|
| 31 |
+
- Multiple item types (typically "books", "hats" and "balls") with limited stocks; each agent has its own per-type values.
|
| 32 |
+
- A deal pays out only if both proposals exactly agree and respect the stock; otherwise no deal (zero reward) that round.
|
| 33 |
+
- Motivation: a known benchmark closer to real-world bargaining, where both parties must explicitly agree.
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
|
src_code_for_reproducibility/markov_games/negotiation/dond_agent.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import re
|
| 3 |
+
from collections.abc import Callable
|
| 4 |
+
from dataclasses import dataclass
|
| 5 |
+
from typing import Any, Dict, List, Tuple
|
| 6 |
+
|
| 7 |
+
from mllm.markov_games.agent import Agent
|
| 8 |
+
from mllm.markov_games.rollout_tree import AgentActLog, ChatTurn
|
| 9 |
+
from mllm.markov_games.negotiation.dond_simulation import (
|
| 10 |
+
DealNoDealObs,
|
| 11 |
+
)
|
| 12 |
+
from mllm.markov_games.negotiation.nego_simulation import Split
|
| 13 |
+
from mllm.markov_games.negotiation.nego_agent import NegotiationAgent, NegotiationAgentState
|
| 14 |
+
|
| 15 |
+
class DealNoDealAgent(NegotiationAgent):
|
| 16 |
+
def __init__(
|
| 17 |
+
self,
|
| 18 |
+
*args,
|
| 19 |
+
**kwargs,
|
| 20 |
+
):
|
| 21 |
+
super().__init__(*args, **kwargs)
|
| 22 |
+
self.intro_prompt = (
|
| 23 |
+
"You are {agent_id}. You are playing an iterated game. "
|
| 24 |
+
"At each round, you and other agent will try to distribute among yourselves items of types {item_types}. "
|
| 25 |
+
"You only know how much you value each item type, but not the other agent's values. "
|
| 26 |
+
"You can communicate with the other agent by sending up to {quota_messages_per_agent_per_round} short messages per round. "
|
| 27 |
+
"Each round, after exchanging messages, you and the other agent will submit a private proposal. "
|
| 28 |
+
"A deal is accepted only if both proposals match exactly and are within stock; otherwise no deal (0 points for both at that round). "
|
| 29 |
+
"The values of the items of the other agent at the previous round are revealed to you after each round. "
|
| 30 |
+
"Your goal is: {goal}."
|
| 31 |
+
)
|
| 32 |
+
self.new_round_prompt = ("New round {round_nb}. Items: {stock}. Your values: {values}. ")
|
| 33 |
+
self.last_round_prompt = ("Last round, other agent's values: {previous_values_coagent}. ")
|
| 34 |
+
self.send_split_prompt = ("Respond with <split>...</split> where you propose how many items of each type you want to keep.")
|
| 35 |
+
|
| 36 |
+
def get_message_regex(self, observation: DealNoDealObs) -> str:
|
| 37 |
+
return r"<message>[\s\S]{0,400}</message>"
|
| 38 |
+
|
| 39 |
+
def get_split_regex(self, observation: DealNoDealObs) -> str:
|
| 40 |
+
parts = []
|
| 41 |
+
for t in observation.item_types:
|
| 42 |
+
s = int(observation.quantities.get(t, 0))
|
| 43 |
+
allowed = "|".join(str(k) for k in range(0, s + 1))
|
| 44 |
+
rng = f"({allowed})"
|
| 45 |
+
parts.append(fr"<{t}>{rng}</{t}>")
|
| 46 |
+
items_block = "".join(parts)
|
| 47 |
+
return fr"(<split>{items_block}</split>)"
|
| 48 |
+
|
| 49 |
+
def get_split_action(self, policy_output: str, observation: DealNoDealObs) -> Split:
|
| 50 |
+
import re as _re
|
| 51 |
+
allocations: Dict[str, int] = {}
|
| 52 |
+
for t in observation.item_types:
|
| 53 |
+
m = _re.search(fr"<{t}>([0-9]+)</{t}>", policy_output)
|
| 54 |
+
if m:
|
| 55 |
+
allocations[t] = int(m.group(1))
|
| 56 |
+
else:
|
| 57 |
+
allocations[t] = 0
|
| 58 |
+
return Split(items_given_to_self=allocations)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
|
src_code_for_reproducibility/markov_games/negotiation/dond_simulation.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
from dataclasses import dataclass
|
| 3 |
+
from typing import Any, Dict, List, Tuple
|
| 4 |
+
|
| 5 |
+
from numpy.random import default_rng
|
| 6 |
+
|
| 7 |
+
from mllm.markov_games.rollout_tree import SimulationStepLog
|
| 8 |
+
from mllm.markov_games.negotiation.nego_simulation import Split, NegotiationState, NegotiationObs, NegotiationSimulation
|
| 9 |
+
from mllm.utils.get_coagent_id import get_coagent_id
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
AgentId = str
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@dataclass
|
| 16 |
+
class DealNoDealState(NegotiationState):
|
| 17 |
+
item_types: List[str]
|
| 18 |
+
values: Dict[AgentId, Dict[str, int]]
|
| 19 |
+
|
| 20 |
+
@dataclass
|
| 21 |
+
class DealNoDealObs(NegotiationObs):
|
| 22 |
+
my_values: Dict[str, int]
|
| 23 |
+
item_types: List[str]
|
| 24 |
+
previous_values_coagent: Dict[str, int] | None
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def random_partition_integer(rng, total: int, parts: int) -> List[int]:
|
| 28 |
+
if parts <= 0:
|
| 29 |
+
return []
|
| 30 |
+
if total <= 0:
|
| 31 |
+
return [0 for _ in range(parts)]
|
| 32 |
+
cuts = sorted(rng.integers(0, total + 1, size=parts - 1).tolist())
|
| 33 |
+
vals = []
|
| 34 |
+
prev = 0
|
| 35 |
+
for c in cuts + [total]:
|
| 36 |
+
vals.append(c - prev)
|
| 37 |
+
prev = c
|
| 38 |
+
return vals
|
| 39 |
+
|
| 40 |
+
class DealNoDealSimulation(NegotiationSimulation):
|
| 41 |
+
|
| 42 |
+
def __init__(
|
| 43 |
+
self,
|
| 44 |
+
item_types: List[str] = ["books", "hats", "balls"],
|
| 45 |
+
*args,
|
| 46 |
+
**kwargs,
|
| 47 |
+
):
|
| 48 |
+
super().__init__(item_types=item_types, *args, **kwargs)
|
| 49 |
+
self.reset()
|
| 50 |
+
|
| 51 |
+
def _other(self, agent_id: AgentId) -> AgentId:
|
| 52 |
+
return get_coagent_id(self.agent_ids, agent_id)
|
| 53 |
+
|
| 54 |
+
def _sample_stock(self) -> Dict[str, int]:
|
| 55 |
+
# total items between 5 and 7
|
| 56 |
+
total_items = int(self.rng.integers(5, 8))
|
| 57 |
+
# nonnegative per-type counts summing to total_items
|
| 58 |
+
parts = random_partition_integer(self.rng, total_items, len(self.item_types))
|
| 59 |
+
# allow zeros per type
|
| 60 |
+
return {t: int(c) for t, c in zip(self.item_types, parts)}
|
| 61 |
+
|
| 62 |
+
def _sample_values_pair(self) -> Dict[AgentId, Dict[str, int]]:
|
| 63 |
+
# Each agent has integer non-negative values that sum to 10
|
| 64 |
+
# Each item type valued by at least one agent
|
| 65 |
+
# Some item type valued by both agents
|
| 66 |
+
while True:
|
| 67 |
+
vals_a = random_partition_integer(self.rng, 10, len(self.item_types))
|
| 68 |
+
vals_b = random_partition_integer(self.rng, 10, len(self.item_types))
|
| 69 |
+
a = {t: int(v) for t, v in zip(self.item_types, vals_a)}
|
| 70 |
+
b = {t: int(v) for t, v in zip(self.item_types, vals_b)}
|
| 71 |
+
# each item valued by at least one
|
| 72 |
+
ok1 = all((a[t] > 0) or (b[t] > 0) for t in self.item_types)
|
| 73 |
+
# some item valued by both
|
| 74 |
+
ok2 = any((a[t] > 0) and (b[t] > 0) for t in self.item_types)
|
| 75 |
+
if ok1 and ok2:
|
| 76 |
+
return {self.agent_ids[0]: a, self.agent_ids[1]: b}
|
| 77 |
+
|
| 78 |
+
def _is_valid_allocation(self, allocation: Dict[str, int], stock: Dict[str, int]) -> bool:
|
| 79 |
+
for t in self.item_types:
|
| 80 |
+
v = allocation.get(t)
|
| 81 |
+
if v is None:
|
| 82 |
+
return False
|
| 83 |
+
if not isinstance(v, int):
|
| 84 |
+
return False
|
| 85 |
+
if v < 0 or v > int(stock.get(t, 0)):
|
| 86 |
+
return False
|
| 87 |
+
return True
|
| 88 |
+
|
| 89 |
+
def set_new_round_of_variant(self):
|
| 90 |
+
# Keep same values, resample stock
|
| 91 |
+
self.state.quantities = self._sample_stock()
|
| 92 |
+
|
| 93 |
+
def get_info_of_variant(self, state: NegotiationState, actions: Dict[AgentId, Any]) -> Dict[str, Any]:
|
| 94 |
+
return {
|
| 95 |
+
"quantities": copy.deepcopy(state.quantities),
|
| 96 |
+
"values": copy.deepcopy(state.values),
|
| 97 |
+
'splits': copy.deepcopy(state.splits),
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
def get_rewards(self, splits: Dict[AgentId, Split]) -> Dict[AgentId, float]:
|
| 101 |
+
"""
|
| 102 |
+
Returns the rewards for each agent.
|
| 103 |
+
"""
|
| 104 |
+
split_a = splits[self.agent_ids[0]].items_given_to_self
|
| 105 |
+
split_b = splits[self.agent_ids[1]].items_given_to_self
|
| 106 |
+
rewards = {self.agent_ids[0]: 0, self.agent_ids[1]: 0}
|
| 107 |
+
for t in self.item_types:
|
| 108 |
+
# If not complementary, return 0!
|
| 109 |
+
if not split_a[t] + split_b[t] == self.state.quantities[t]:
|
| 110 |
+
return {self.agent_ids[0]: 0, self.agent_ids[1]: 0}
|
| 111 |
+
rewards[self.agent_ids[0]] += split_a[t] * self.state.values[self.agent_ids[0]][t]
|
| 112 |
+
rewards[self.agent_ids[1]] += split_b[t] * self.state.values[self.agent_ids[1]][t]
|
| 113 |
+
return rewards
|
| 114 |
+
|
| 115 |
+
def get_obs(self):
|
| 116 |
+
return {agent_id: self.get_obs_agent(agent_id) for agent_id in self.agent_ids}
|
| 117 |
+
|
| 118 |
+
def get_obs_agent(self, agent_id):
|
| 119 |
+
other_id = self._other(agent_id)
|
| 120 |
+
obs = DealNoDealObs(
|
| 121 |
+
round_nb=self.state.round_nb,
|
| 122 |
+
last_message=self.state.last_message,
|
| 123 |
+
current_agent=self.state.current_agent,
|
| 124 |
+
quantities=copy.deepcopy(self.state.quantities),
|
| 125 |
+
value=0.0, # unused in DOND
|
| 126 |
+
other_agent_split=None, # not meaningful until split
|
| 127 |
+
split_phase=self.state.split_phase,
|
| 128 |
+
quota_messages_per_agent_per_round=self.quota_messages_per_agent_per_round,
|
| 129 |
+
my_values=copy.deepcopy(self.state.values[agent_id]),
|
| 130 |
+
item_types=list(self.item_types),
|
| 131 |
+
previous_values_coagent=copy.deepcopy(self.state.values.get(other_id, {})),
|
| 132 |
+
)
|
| 133 |
+
return obs
|
| 134 |
+
|
| 135 |
+
def reset(self):
|
| 136 |
+
start_agent = self.agent_ids[self._starting_agent_index]
|
| 137 |
+
stock = self._sample_stock()
|
| 138 |
+
values = self._sample_values_pair()
|
| 139 |
+
self.state = DealNoDealState(
|
| 140 |
+
round_nb=0,
|
| 141 |
+
last_message="",
|
| 142 |
+
current_agent=start_agent,
|
| 143 |
+
quantities=stock,
|
| 144 |
+
values=values,
|
| 145 |
+
previous_values=None,
|
| 146 |
+
splits={aid: None for aid in self.agent_ids},
|
| 147 |
+
nb_messages_sent={aid: 0 for aid in self.agent_ids},
|
| 148 |
+
split_phase=False,
|
| 149 |
+
item_types=list(self.item_types),
|
| 150 |
+
)
|
| 151 |
+
return self.get_obs()
|
| 152 |
+
|
| 153 |
+
|
src_code_for_reproducibility/markov_games/negotiation/nego_agent.py
ADDED
|
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
from abc import abstractmethod
|
| 3 |
+
from collections.abc import Callable
|
| 4 |
+
from dataclasses import dataclass
|
| 5 |
+
from typing import Any, Dict, List, Tuple
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
|
| 9 |
+
from mllm.markov_games.agent import Agent
|
| 10 |
+
from mllm.markov_games.negotiation.nego_simulation import Message, NegotiationObs, Split
|
| 11 |
+
from mllm.markov_games.rollout_tree import AgentActLog, ChatTurn
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@dataclass
|
| 15 |
+
class NegotiationAgentState:
|
| 16 |
+
round_nb: int
|
| 17 |
+
nb_messages_sent_this_round: int
|
| 18 |
+
chat_counter: int
|
| 19 |
+
chat_history: List[ChatTurn]
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class NegotiationAgent(Agent):
|
| 23 |
+
def __init__(
|
| 24 |
+
self,
|
| 25 |
+
seed: int,
|
| 26 |
+
agent_id: str,
|
| 27 |
+
agent_name: str,
|
| 28 |
+
policy: Callable[[List[Dict]], str],
|
| 29 |
+
goal: str,
|
| 30 |
+
exploration_prompts: List[str] = [],
|
| 31 |
+
exploration_prompt_probs: List[float] = [],
|
| 32 |
+
):
|
| 33 |
+
self.seed = seed
|
| 34 |
+
self.agent_id = agent_id
|
| 35 |
+
self.agent_name = agent_name
|
| 36 |
+
self.policy = policy
|
| 37 |
+
self.goal = goal
|
| 38 |
+
self.exploration_prompts_toggled = len(exploration_prompts) > 0
|
| 39 |
+
if self.exploration_prompts_toggled:
|
| 40 |
+
exploration_prompts = copy.deepcopy(exploration_prompts)
|
| 41 |
+
exploration_prompts.append(None)
|
| 42 |
+
self.exploration_prompts = exploration_prompts
|
| 43 |
+
self.exploration_prompt_probs = np.array(exploration_prompt_probs)
|
| 44 |
+
assert self.exploration_prompt_probs.sum() <= 1
|
| 45 |
+
assert np.all(self.exploration_prompt_probs >= 0)
|
| 46 |
+
self.exploration_prompt_probs = np.append(
|
| 47 |
+
self.exploration_prompt_probs, 1 - self.exploration_prompt_probs.sum()
|
| 48 |
+
)
|
| 49 |
+
self.state = NegotiationAgentState(
|
| 50 |
+
round_nb=0, nb_messages_sent_this_round=0, chat_counter=0, chat_history=[]
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
# Implemented in variants
|
| 54 |
+
self.intro_prompt = ""
|
| 55 |
+
self.new_round_prompt = ""
|
| 56 |
+
self.last_round_prompt = ""
|
| 57 |
+
self.send_split_prompt = ""
|
| 58 |
+
self.wait_for_message_prompt = ""
|
| 59 |
+
self.last_message_prompt = ""
|
| 60 |
+
self.send_message_prompt = ""
|
| 61 |
+
|
| 62 |
+
@abstractmethod
|
| 63 |
+
def get_message_regex(self, observation: NegotiationObs) -> str:
|
| 64 |
+
pass
|
| 65 |
+
|
| 66 |
+
@abstractmethod
|
| 67 |
+
def get_split_regex(self, observation: NegotiationObs) -> str:
|
| 68 |
+
pass
|
| 69 |
+
|
| 70 |
+
@abstractmethod
|
| 71 |
+
def get_split_action(
|
| 72 |
+
self, policy_output: str, observation: NegotiationObs
|
| 73 |
+
) -> Split:
|
| 74 |
+
pass
|
| 75 |
+
|
| 76 |
+
async def act(self, observation: NegotiationObs) -> Tuple[Any, AgentActLog]:
|
| 77 |
+
def dict_to_str(d: dict) -> str:
|
| 78 |
+
return ", ".join(f"{v} {k}" for k, v in d.items())
|
| 79 |
+
|
| 80 |
+
def dict_to_eq_str(d: dict) -> str:
|
| 81 |
+
return ", ".join(f"{k}={v}" for k, v in d.items())
|
| 82 |
+
|
| 83 |
+
is_our_turn = observation.current_agent == self.agent_id
|
| 84 |
+
action: Any = None
|
| 85 |
+
round_nb = observation.round_nb
|
| 86 |
+
|
| 87 |
+
prompt_parts: List[str] = []
|
| 88 |
+
obs_ctx = vars(observation)
|
| 89 |
+
obs_ctx_formmated = obs_ctx.copy()
|
| 90 |
+
for key in obs_ctx_formmated:
|
| 91 |
+
if isinstance(obs_ctx_formmated[key], dict) and "value" not in key:
|
| 92 |
+
obs_ctx_formmated[key] = dict_to_str(obs_ctx_formmated[key])
|
| 93 |
+
elif isinstance(obs_ctx_formmated[key], dict) and "value" in key:
|
| 94 |
+
obs_ctx_formmated[key] = dict_to_eq_str(obs_ctx_formmated[key])
|
| 95 |
+
|
| 96 |
+
#######################################
|
| 97 |
+
# build user prompt
|
| 98 |
+
#######################################
|
| 99 |
+
|
| 100 |
+
# First-ever call
|
| 101 |
+
is_intro = round_nb == 0 and self.state.chat_counter == 0
|
| 102 |
+
if is_intro:
|
| 103 |
+
prompt_parts.append(
|
| 104 |
+
self.intro_prompt.format(
|
| 105 |
+
goal=self.goal, agent=self.agent_name, **obs_ctx_formmated
|
| 106 |
+
)
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
# New round
|
| 110 |
+
is_new_round = round_nb > self.state.round_nb
|
| 111 |
+
if is_new_round or is_intro:
|
| 112 |
+
self.state.nb_messages_sent_this_round = 0
|
| 113 |
+
if not is_intro:
|
| 114 |
+
prompt_parts.append(self.last_round_prompt.format(**obs_ctx_formmated))
|
| 115 |
+
prompt_parts.append(self.new_round_prompt.format(**obs_ctx_formmated))
|
| 116 |
+
if self.exploration_prompts_toggled:
|
| 117 |
+
exploration_prompt = self.exploration_prompts[
|
| 118 |
+
np.random.choice(
|
| 119 |
+
len(self.exploration_prompts), p=self.exploration_prompt_probs
|
| 120 |
+
)
|
| 121 |
+
]
|
| 122 |
+
if exploration_prompt is not None:
|
| 123 |
+
prompt_parts.append(exploration_prompt)
|
| 124 |
+
self.state.round_nb = round_nb
|
| 125 |
+
|
| 126 |
+
# Wait for message
|
| 127 |
+
if not is_our_turn and not observation.split_phase:
|
| 128 |
+
prompt_parts.append(
|
| 129 |
+
self.wait_for_message_prompt.format(**obs_ctx_formmated)
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
# Get last message
|
| 133 |
+
if is_our_turn and not is_new_round and not is_intro:
|
| 134 |
+
prompt_parts.append(self.last_message_prompt.format(**obs_ctx_formmated))
|
| 135 |
+
|
| 136 |
+
# Prompt to send message
|
| 137 |
+
must_send_message = not observation.split_phase and is_our_turn
|
| 138 |
+
if must_send_message:
|
| 139 |
+
prompt_parts.append(self.send_message_prompt.format(**obs_ctx_formmated))
|
| 140 |
+
|
| 141 |
+
# Prompt to give split
|
| 142 |
+
must_send_split = not must_send_message and observation.split_phase
|
| 143 |
+
if must_send_split:
|
| 144 |
+
var_names = ["x", "y", "z", "w"] # Extend as needed
|
| 145 |
+
items_str = ", ".join(
|
| 146 |
+
[
|
| 147 |
+
f"{var_names[i]} {item}"
|
| 148 |
+
for i, item in enumerate(obs_ctx["quantities"].keys())
|
| 149 |
+
]
|
| 150 |
+
)
|
| 151 |
+
ranges_str = ", ".join(
|
| 152 |
+
[
|
| 153 |
+
f"{var_names[i]}: 0-{obs_ctx['quantities'][item]} (integer)"
|
| 154 |
+
for i, item in enumerate(obs_ctx["quantities"].keys())
|
| 155 |
+
]
|
| 156 |
+
)
|
| 157 |
+
proposal_style = f"Proposal: {items_str} where {ranges_str}."
|
| 158 |
+
proposal_style2 = (
|
| 159 |
+
f"<items_to_self> {items_str} </items_to_self> where {ranges_str}."
|
| 160 |
+
)
|
| 161 |
+
prompt_parts.append(
|
| 162 |
+
self.send_split_prompt.format(
|
| 163 |
+
proposal_style=proposal_style,
|
| 164 |
+
proposal_style2=proposal_style2,
|
| 165 |
+
**obs_ctx_formmated,
|
| 166 |
+
)
|
| 167 |
+
)
|
| 168 |
+
|
| 169 |
+
# Append one ChatTurn with is_state_end=True
|
| 170 |
+
user_prompt = "\n".join(prompt_parts)
|
| 171 |
+
self.state.chat_history.append(
|
| 172 |
+
ChatTurn(
|
| 173 |
+
agent_id=self.agent_id,
|
| 174 |
+
role="user",
|
| 175 |
+
content=user_prompt,
|
| 176 |
+
is_state_end=True,
|
| 177 |
+
)
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
#######################################
|
| 181 |
+
# Get policy action
|
| 182 |
+
#######################################
|
| 183 |
+
|
| 184 |
+
# Query policy for the appropriate format
|
| 185 |
+
if must_send_message:
|
| 186 |
+
return_regex = self.get_message_regex(observation)
|
| 187 |
+
policy_output = await self.policy(
|
| 188 |
+
state=self.state.chat_history,
|
| 189 |
+
agent_id=self.agent_id,
|
| 190 |
+
regex=return_regex,
|
| 191 |
+
)
|
| 192 |
+
self.state.chat_history.append(
|
| 193 |
+
ChatTurn(
|
| 194 |
+
agent_id=self.agent_id,
|
| 195 |
+
role="assistant",
|
| 196 |
+
content=policy_output.content,
|
| 197 |
+
reasoning_content=policy_output.reasoning_content,
|
| 198 |
+
log_probs=policy_output.log_probs,
|
| 199 |
+
out_token_ids=policy_output.out_token_ids,
|
| 200 |
+
is_state_end=False,
|
| 201 |
+
)
|
| 202 |
+
)
|
| 203 |
+
action = Message(message=policy_output.content)
|
| 204 |
+
self.state.nb_messages_sent_this_round += 1
|
| 205 |
+
|
| 206 |
+
elif must_send_split:
|
| 207 |
+
return_regex = self.get_split_regex(observation)
|
| 208 |
+
policy_output = await self.policy(
|
| 209 |
+
state=self.state.chat_history,
|
| 210 |
+
agent_id=self.agent_id,
|
| 211 |
+
regex=return_regex,
|
| 212 |
+
)
|
| 213 |
+
self.state.chat_history.append(
|
| 214 |
+
ChatTurn(
|
| 215 |
+
agent_id=self.agent_id,
|
| 216 |
+
role="assistant",
|
| 217 |
+
content=policy_output.content,
|
| 218 |
+
reasoning_content=policy_output.reasoning_content,
|
| 219 |
+
log_probs=policy_output.log_probs,
|
| 220 |
+
out_token_ids=policy_output.out_token_ids,
|
| 221 |
+
is_state_end=False,
|
| 222 |
+
)
|
| 223 |
+
)
|
| 224 |
+
action = self.get_split_action(policy_output.content, observation)
|
| 225 |
+
else:
|
| 226 |
+
action = None
|
| 227 |
+
|
| 228 |
+
agent_step_log = AgentActLog(
|
| 229 |
+
chat_turns=self.state.chat_history[self.state.chat_counter :], info=None
|
| 230 |
+
)
|
| 231 |
+
self.state.chat_counter = len(self.state.chat_history)
|
| 232 |
+
return action, agent_step_log
|
| 233 |
+
|
| 234 |
+
def get_safe_copy(self):
|
| 235 |
+
agent_copy = copy.copy(self)
|
| 236 |
+
agent_copy.state = copy.deepcopy(self.state)
|
| 237 |
+
return agent_copy
|
| 238 |
+
|
| 239 |
+
def reset(self):
|
| 240 |
+
self.state = NegotiationAgentState(
|
| 241 |
+
round_nb=0, nb_messages_sent_this_round=0, chat_counter=0, chat_history=[]
|
| 242 |
+
)
|
src_code_for_reproducibility/markov_games/negotiation/nego_hard_coded_policies.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
from typing import Optional
|
| 3 |
+
from mllm.markov_games.negotiation.nego_agent import NegotiationAgent
|
| 4 |
+
from mllm.markov_games.negotiation.no_press_nego_agent import NoPressAgent
|
| 5 |
+
from mllm.markov_games.negotiation.no_press_nego_simulation import NoPressObs
|
| 6 |
+
from mllm.markov_games.rollout_tree import AgentActLog, ChatTurn
|
| 7 |
+
from mllm.markov_games.negotiation.nego_simulation import Split
|
| 8 |
+
from typing import Any, Tuple
|
| 9 |
+
|
| 10 |
+
class HardCodedNegoWelfareMaximizingPolicy(NoPressAgent):
|
| 11 |
+
async def act(self, observation: NoPressObs) -> Tuple[Any, AgentActLog]:
|
| 12 |
+
"""
|
| 13 |
+
Policy that gives all of the items to the agent who values them more.
|
| 14 |
+
If the items are equally valued, give them to the agent who values them more.
|
| 15 |
+
"""
|
| 16 |
+
quantities = observation.quantities
|
| 17 |
+
my_values = observation.value
|
| 18 |
+
other_values = observation.other_value
|
| 19 |
+
|
| 20 |
+
items_given_to_self = {}
|
| 21 |
+
for item, qty in quantities.items():
|
| 22 |
+
my_v = float(my_values.get(item, 0))
|
| 23 |
+
other_v = float(other_values.get(item, 0))
|
| 24 |
+
if my_v == other_v:
|
| 25 |
+
items_given_to_self[item] = int(qty) / 2
|
| 26 |
+
else:
|
| 27 |
+
items_given_to_self[item] = int(qty if my_v > other_v else 0)
|
| 28 |
+
|
| 29 |
+
action = Split(items_given_to_self=items_given_to_self)
|
| 30 |
+
act_log = AgentActLog(
|
| 31 |
+
chat_turns=[
|
| 32 |
+
ChatTurn(
|
| 33 |
+
agent_id=self.agent_id,
|
| 34 |
+
role="assistant",
|
| 35 |
+
content="Using welfare-maximizing split (all to higher-value agent).",
|
| 36 |
+
is_state_end=True,
|
| 37 |
+
)
|
| 38 |
+
],
|
| 39 |
+
info=None,
|
| 40 |
+
)
|
| 41 |
+
return action, act_log
|
| 42 |
+
|
| 43 |
+
class HardCodedNegoGreedyPolicy(NoPressAgent):
|
| 44 |
+
async def act(self, observation: NoPressObs) -> Tuple[Any, AgentActLog]:
|
| 45 |
+
"""
|
| 46 |
+
Always gives itself all of the items.
|
| 47 |
+
"""
|
| 48 |
+
quantities = observation.quantities
|
| 49 |
+
items_given_to_self = {item: int(qty) for item, qty in quantities.items()}
|
| 50 |
+
|
| 51 |
+
action = Split(items_given_to_self=items_given_to_self)
|
| 52 |
+
act_log = AgentActLog(
|
| 53 |
+
chat_turns=[
|
| 54 |
+
ChatTurn(
|
| 55 |
+
agent_id=self.agent_id,
|
| 56 |
+
role="assistant",
|
| 57 |
+
content="Using greedy split (keep all items).",
|
| 58 |
+
is_state_end=True,
|
| 59 |
+
)
|
| 60 |
+
],
|
| 61 |
+
info=None,
|
| 62 |
+
)
|
| 63 |
+
return action, act_log
|
| 64 |
+
|
src_code_for_reproducibility/markov_games/negotiation/nego_simulation.py
ADDED
|
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Negotiation simulation environment
|
| 3 |
+
other agent is set at the start of every round. Even though current agent changes over message turns in a round.
|
| 4 |
+
"""
|
| 5 |
+
import copy
|
| 6 |
+
from abc import abstractmethod
|
| 7 |
+
from dataclasses import dataclass
|
| 8 |
+
from typing import Any, Dict, List, Tuple
|
| 9 |
+
|
| 10 |
+
from numpy.random import default_rng
|
| 11 |
+
|
| 12 |
+
from mllm.markov_games.rollout_tree import SimulationStepLog
|
| 13 |
+
from mllm.markov_games.simulation import Simulation
|
| 14 |
+
from mllm.utils.get_coagent_id import get_coagent_id
|
| 15 |
+
|
| 16 |
+
AgentId = str
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
@dataclass
|
| 20 |
+
class Split:
|
| 21 |
+
items_given_to_self: Dict[str, int]
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@dataclass
|
| 25 |
+
class Message:
|
| 26 |
+
message: str
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
@dataclass # gets extended by variants
|
| 30 |
+
class NegotiationState:
|
| 31 |
+
round_nb: int
|
| 32 |
+
last_message: str
|
| 33 |
+
current_agent: AgentId
|
| 34 |
+
quantities: Dict[str, int]
|
| 35 |
+
values: Dict[AgentId, Dict[str, float]]
|
| 36 |
+
splits: Dict[AgentId, Split | None]
|
| 37 |
+
nb_messages_sent: Dict[AgentId, int]
|
| 38 |
+
previous_values: Dict[AgentId, Dict[str, float]] | None
|
| 39 |
+
previous_splits: Dict[AgentId, Dict[str, int] | None] | None
|
| 40 |
+
previous_points: Dict[AgentId, float] | None
|
| 41 |
+
previous_quantities: Dict[str, int] | None
|
| 42 |
+
split_phase: bool
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
@dataclass # gets extended by variants
|
| 46 |
+
class NegotiationObs:
|
| 47 |
+
round_nb: int
|
| 48 |
+
last_message: str
|
| 49 |
+
quota_messages_per_agent_per_round: int
|
| 50 |
+
current_agent: AgentId
|
| 51 |
+
other_agent: str
|
| 52 |
+
quantities: Dict[str, int]
|
| 53 |
+
item_types: List[str]
|
| 54 |
+
value: Dict[str, int]
|
| 55 |
+
split_phase: bool
|
| 56 |
+
last_split_agent: Dict[str, int] | None
|
| 57 |
+
last_value_agent: Dict[str, int] | None
|
| 58 |
+
last_points_agent: float | None
|
| 59 |
+
last_split_coagent: Dict[str, int] | None
|
| 60 |
+
last_value_coagent: Dict[str, int] | None
|
| 61 |
+
last_points_coagent: float | None
|
| 62 |
+
last_quantities: Dict[str, int] | None
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def compute_tas_style_rewards(
|
| 66 |
+
agent_ids: List[AgentId],
|
| 67 |
+
values: Dict[AgentId, float],
|
| 68 |
+
splits: Dict[AgentId, Split],
|
| 69 |
+
quantities: Dict[str, int],
|
| 70 |
+
) -> Dict[AgentId, float]:
|
| 71 |
+
"""
|
| 72 |
+
TAS-like reward computation: if sum of proposed coins exceeds max_coins,
|
| 73 |
+
allocate proportionally. Otherwise, use proposed amounts directly.
|
| 74 |
+
Rewards are quantity_kept * per-coin value for each agent.
|
| 75 |
+
"""
|
| 76 |
+
a0, a1 = agent_ids[0], agent_ids[1]
|
| 77 |
+
r0, r1 = 0.0, 0.0
|
| 78 |
+
|
| 79 |
+
for item in quantities:
|
| 80 |
+
max_item = quantities[item]
|
| 81 |
+
item_to_self_0 = int(
|
| 82 |
+
(splits[a0].items_given_to_self.get(item, 0))
|
| 83 |
+
if splits[a0] is not None
|
| 84 |
+
else 0
|
| 85 |
+
)
|
| 86 |
+
item_to_self_1 = int(
|
| 87 |
+
(splits[a1].items_given_to_self.get(item, 0))
|
| 88 |
+
if splits[a1] is not None
|
| 89 |
+
else 0
|
| 90 |
+
)
|
| 91 |
+
denom = max(int(max_item), item_to_self_0 + item_to_self_1)
|
| 92 |
+
q0 = float(max_item) * float(item_to_self_0) / float(denom)
|
| 93 |
+
q1 = float(max_item) * float(item_to_self_1) / float(denom)
|
| 94 |
+
if type(values[a0]) is not dict:
|
| 95 |
+
r0 += q0 * float(values[a0])
|
| 96 |
+
r1 += q1 * float(values[a1])
|
| 97 |
+
else:
|
| 98 |
+
r0 += q0 * float(values[a0][item])
|
| 99 |
+
r1 += q1 * float(values[a1][item])
|
| 100 |
+
return {a0: r0, a1: r1}
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class NegotiationSimulation(Simulation):
|
| 104 |
+
def __init__(
|
| 105 |
+
self,
|
| 106 |
+
agent_ids: List[AgentId],
|
| 107 |
+
agent_names: List[str],
|
| 108 |
+
seed: int,
|
| 109 |
+
nb_of_rounds: int,
|
| 110 |
+
quota_messages_per_agent_per_round: int,
|
| 111 |
+
item_types: List[str] | None = None,
|
| 112 |
+
):
|
| 113 |
+
self.seed = seed
|
| 114 |
+
self.rng = default_rng(self.seed)
|
| 115 |
+
self.agent_ids = list(agent_ids)
|
| 116 |
+
self.agent_names = agent_names
|
| 117 |
+
self.agent_id_to_name = {
|
| 118 |
+
agent_id: agent_name for agent_id, agent_name in zip(agent_ids, agent_names)
|
| 119 |
+
}
|
| 120 |
+
self.nb_of_rounds = int(nb_of_rounds)
|
| 121 |
+
self.quota_messages_per_agent_per_round = int(
|
| 122 |
+
quota_messages_per_agent_per_round
|
| 123 |
+
)
|
| 124 |
+
if item_types is not None:
|
| 125 |
+
self.item_types = [item.lower() for item in item_types]
|
| 126 |
+
else:
|
| 127 |
+
self.item_types = ["coins"]
|
| 128 |
+
self.state: NegotiationState | None = None
|
| 129 |
+
self._starting_agent_index = self.rng.choice([0, 1])
|
| 130 |
+
self.reset()
|
| 131 |
+
|
| 132 |
+
def _other(self, agent_id: AgentId) -> AgentId:
|
| 133 |
+
return get_coagent_id(self.agent_ids, agent_id)
|
| 134 |
+
|
| 135 |
+
@abstractmethod
|
| 136 |
+
def set_new_round_of_variant(self):
|
| 137 |
+
pass
|
| 138 |
+
|
| 139 |
+
@abstractmethod
|
| 140 |
+
def get_info_of_variant(
|
| 141 |
+
self, state: NegotiationState, actions: Dict[AgentId, Any]
|
| 142 |
+
) -> Dict[str, Any]:
|
| 143 |
+
pass
|
| 144 |
+
|
| 145 |
+
def step(self, actions: Any) -> Tuple[bool, SimulationStepLog]:
|
| 146 |
+
"""
|
| 147 |
+
Returns terminated, step_log
|
| 148 |
+
"""
|
| 149 |
+
assert self.state is not None
|
| 150 |
+
current_agent = self.state.current_agent
|
| 151 |
+
a0, a1 = self.agent_ids[0], self.agent_ids[1]
|
| 152 |
+
action = actions.get(current_agent)
|
| 153 |
+
|
| 154 |
+
# Split phase: require both splits in the same timestep
|
| 155 |
+
if self.state.split_phase:
|
| 156 |
+
action_a0 = actions.get(a0)
|
| 157 |
+
action_a1 = actions.get(a1)
|
| 158 |
+
have_both_splits = isinstance(action_a0, Split) and isinstance(
|
| 159 |
+
action_a1, Split
|
| 160 |
+
)
|
| 161 |
+
if not have_both_splits:
|
| 162 |
+
rewards = {agent_id: 0.0 for agent_id in self.agent_ids}
|
| 163 |
+
return False, SimulationStepLog(
|
| 164 |
+
rewards=rewards, info={"type": "waiting_for_splits"}
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
# Record splits
|
| 168 |
+
self.state.splits[a0] = action_a0
|
| 169 |
+
self.state.splits[a1] = action_a1
|
| 170 |
+
|
| 171 |
+
# Compute rewards and end round
|
| 172 |
+
rewards = self.get_rewards(self.state.splits)
|
| 173 |
+
|
| 174 |
+
# Info
|
| 175 |
+
info = self.get_info_of_variant(self.state, actions)
|
| 176 |
+
|
| 177 |
+
# Prepare next round
|
| 178 |
+
# Alternate starting agent
|
| 179 |
+
self.state.round_nb += 1
|
| 180 |
+
self._starting_agent_index = 1 - self._starting_agent_index
|
| 181 |
+
self.state.current_agent = self.agent_ids[self._starting_agent_index]
|
| 182 |
+
self.state.previous_values = copy.deepcopy(self.state.values)
|
| 183 |
+
self.state.previous_splits = copy.deepcopy(self.state.splits)
|
| 184 |
+
self.state.previous_quantities = copy.deepcopy(self.state.quantities)
|
| 185 |
+
self.state.previous_points = copy.deepcopy(rewards)
|
| 186 |
+
self.state.last_message = ""
|
| 187 |
+
self.set_new_round_of_variant() # variant specific
|
| 188 |
+
self.state.splits = {agent_id: None for agent_id in self.agent_ids}
|
| 189 |
+
self.state.nb_messages_sent = {agent_id: 0 for agent_id in self.agent_ids}
|
| 190 |
+
is_last_timestep_in_round = True
|
| 191 |
+
done = self.state.round_nb >= self.nb_of_rounds
|
| 192 |
+
|
| 193 |
+
# Message phase
|
| 194 |
+
elif isinstance(action, Message):
|
| 195 |
+
self.state.last_message = action.message
|
| 196 |
+
self.state.nb_messages_sent[current_agent] += 1
|
| 197 |
+
|
| 198 |
+
# Move turn to other agent
|
| 199 |
+
self.state.current_agent = self._other(current_agent)
|
| 200 |
+
|
| 201 |
+
# If both agents have reached their message quota, enter split phase
|
| 202 |
+
if all(
|
| 203 |
+
self.state.nb_messages_sent[agent_id]
|
| 204 |
+
>= self.quota_messages_per_agent_per_round
|
| 205 |
+
for agent_id in self.agent_ids
|
| 206 |
+
):
|
| 207 |
+
self.state.split_phase = True
|
| 208 |
+
is_last_timestep_in_round = False
|
| 209 |
+
done = False
|
| 210 |
+
rewards = {agent_id: 0.0 for agent_id in self.agent_ids}
|
| 211 |
+
info = {"type": "message"}
|
| 212 |
+
|
| 213 |
+
info[
|
| 214 |
+
"is_last_timestep_in_round"
|
| 215 |
+
] = is_last_timestep_in_round # Used later to group round timesteps if needed
|
| 216 |
+
return done, SimulationStepLog(rewards=rewards, info=info)
|
| 217 |
+
|
| 218 |
+
def get_obs(self):
|
| 219 |
+
"""Returns all agent observations in dict"""
|
| 220 |
+
return {agent_id: self.get_obs_agent(agent_id) for agent_id in self.agent_ids}
|
| 221 |
+
|
| 222 |
+
@abstractmethod
|
| 223 |
+
def get_rewards(self, splits: Dict[AgentId, Split]) -> Dict[AgentId, float]:
|
| 224 |
+
pass
|
| 225 |
+
|
| 226 |
+
@abstractmethod
|
| 227 |
+
def get_obs_agent(self, agent_id):
|
| 228 |
+
pass
|
| 229 |
+
|
| 230 |
+
def get_state(self):
|
| 231 |
+
return self.state
|
| 232 |
+
|
| 233 |
+
def get_safe_copy(self):
|
| 234 |
+
"""Return a safe copy of the simulation."""
|
| 235 |
+
simulation_copy = copy.copy(self)
|
| 236 |
+
simulation_copy.state = copy.deepcopy(self.state)
|
| 237 |
+
return simulation_copy
|
| 238 |
+
|
| 239 |
+
@abstractmethod
|
| 240 |
+
def reset(self) -> dict[AgentId, NegotiationObs]:
|
| 241 |
+
pass
|
src_code_for_reproducibility/markov_games/negotiation/no_press_nego_simulation.py
ADDED
|
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
from collections import defaultdict
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
from typing import Any, Dict, List, Literal, Tuple
|
| 5 |
+
|
| 6 |
+
from mllm.markov_games.negotiation.nego_simulation import (
|
| 7 |
+
NegotiationObs,
|
| 8 |
+
NegotiationSimulation,
|
| 9 |
+
NegotiationState,
|
| 10 |
+
Split,
|
| 11 |
+
compute_tas_style_rewards,
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
AgentId = str
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@dataclass
|
| 18 |
+
class NoPressState(NegotiationState):
|
| 19 |
+
pass
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@dataclass
|
| 23 |
+
class NoPressObs(NegotiationObs):
|
| 24 |
+
other_value: Dict[str, float]
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class NoPressSimulation(NegotiationSimulation):
|
| 28 |
+
def __init__(
|
| 29 |
+
self,
|
| 30 |
+
game_type: Literal["10-1-exclusive", "10-1-ties", "1-to-20"] = "1-to-20",
|
| 31 |
+
same_round_value: bool = True,
|
| 32 |
+
atleast_one_conflict: bool = False,
|
| 33 |
+
*args,
|
| 34 |
+
**kwargs,
|
| 35 |
+
):
|
| 36 |
+
self.game_type = game_type
|
| 37 |
+
self.same_round_value = same_round_value
|
| 38 |
+
self.atleast_one_conflict = atleast_one_conflict
|
| 39 |
+
super().__init__(*args, **kwargs)
|
| 40 |
+
|
| 41 |
+
def _sample_values(self) -> Dict[AgentId, dict]:
|
| 42 |
+
values = defaultdict(dict)
|
| 43 |
+
if self.state is None:
|
| 44 |
+
item_types = self.item_types
|
| 45 |
+
else:
|
| 46 |
+
item_types = list(self.state.quantities.keys())
|
| 47 |
+
while True:
|
| 48 |
+
for item in item_types:
|
| 49 |
+
if self.game_type == "10-1-exclusive":
|
| 50 |
+
v = int(self.rng.choice([1, 10]))
|
| 51 |
+
values[self.agent_ids[0]][item] = v
|
| 52 |
+
values[self.agent_ids[1]][item] = 10 if v == 1 else 1
|
| 53 |
+
elif self.game_type == "10-1-ties":
|
| 54 |
+
for aid in self.agent_ids:
|
| 55 |
+
values[aid][item] = int(self.rng.choice([1, 10]))
|
| 56 |
+
elif self.game_type == "1-to-20":
|
| 57 |
+
for aid in self.agent_ids:
|
| 58 |
+
values[aid][item] = int(self.rng.integers(1, 21))
|
| 59 |
+
if self.atleast_one_conflict:
|
| 60 |
+
has_conflict = False
|
| 61 |
+
for item in item_types:
|
| 62 |
+
agent_values_for_item = [
|
| 63 |
+
values[aid][item] for aid in self.agent_ids
|
| 64 |
+
]
|
| 65 |
+
if len(set(agent_values_for_item)) > 1:
|
| 66 |
+
has_conflict = True
|
| 67 |
+
break
|
| 68 |
+
if not has_conflict:
|
| 69 |
+
continue
|
| 70 |
+
agent_values = [sum(v.values()) for v in values.values()]
|
| 71 |
+
if len(set(agent_values)) == 1 or not self.same_round_value:
|
| 72 |
+
break
|
| 73 |
+
return values
|
| 74 |
+
|
| 75 |
+
def _sample_quantities(self) -> Dict[str, int]:
|
| 76 |
+
return {item.lower(): 10 for item in self.item_types}
|
| 77 |
+
|
| 78 |
+
def set_new_round_of_variant(self):
|
| 79 |
+
self.state.quantities = self._sample_quantities()
|
| 80 |
+
self.state.values = self._sample_values()
|
| 81 |
+
self.state.split_phase = True
|
| 82 |
+
|
| 83 |
+
def get_info_of_variant(
|
| 84 |
+
self, state: NegotiationState, actions: Dict[AgentId, Any]
|
| 85 |
+
) -> Dict[str, Any]:
|
| 86 |
+
return {
|
| 87 |
+
"quantities": copy.deepcopy(state.quantities),
|
| 88 |
+
"values": copy.deepcopy(state.values),
|
| 89 |
+
"splits": copy.deepcopy(state.splits),
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
def get_rewards(self, splits: Dict[AgentId, Split]) -> Dict[AgentId, float]:
|
| 93 |
+
return compute_tas_style_rewards(
|
| 94 |
+
self.agent_ids, self.state.values, splits, self.state.quantities
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
def get_obs(self):
|
| 98 |
+
return {agent_id: self.get_obs_agent(agent_id) for agent_id in self.agent_ids}
|
| 99 |
+
|
| 100 |
+
def get_obs_agent(self, agent_id):
|
| 101 |
+
other_id = self._other(agent_id)
|
| 102 |
+
last_value_coagent = (
|
| 103 |
+
None
|
| 104 |
+
if self.state.previous_values is None
|
| 105 |
+
else self.state.previous_values.get(other_id)
|
| 106 |
+
)
|
| 107 |
+
last_points_coagent = (
|
| 108 |
+
None
|
| 109 |
+
if self.state.previous_points is None
|
| 110 |
+
else round(self.state.previous_points.get(other_id), 1)
|
| 111 |
+
)
|
| 112 |
+
last_value_agent = (
|
| 113 |
+
None
|
| 114 |
+
if self.state.previous_values is None
|
| 115 |
+
else self.state.previous_values.get(agent_id)
|
| 116 |
+
)
|
| 117 |
+
last_points_agent = (
|
| 118 |
+
None
|
| 119 |
+
if self.state.previous_points is None
|
| 120 |
+
else round(self.state.previous_points.get(agent_id), 1)
|
| 121 |
+
)
|
| 122 |
+
last_split_coagent = None
|
| 123 |
+
last_split_agent = None
|
| 124 |
+
if self.state.previous_splits is not None:
|
| 125 |
+
last_split_coagent = self.state.previous_splits[
|
| 126 |
+
other_id
|
| 127 |
+
].items_given_to_self
|
| 128 |
+
last_split_agent = self.state.previous_splits[agent_id].items_given_to_self
|
| 129 |
+
obs = NoPressObs(
|
| 130 |
+
round_nb=self.state.round_nb,
|
| 131 |
+
last_message="",
|
| 132 |
+
quota_messages_per_agent_per_round=self.quota_messages_per_agent_per_round,
|
| 133 |
+
current_agent=self.state.current_agent,
|
| 134 |
+
other_agent=self.agent_id_to_name[other_id],
|
| 135 |
+
quantities=self.state.quantities,
|
| 136 |
+
item_types=self.item_types,
|
| 137 |
+
value=self.state.values[agent_id],
|
| 138 |
+
split_phase=self.state.split_phase,
|
| 139 |
+
last_split_agent=last_split_agent,
|
| 140 |
+
last_value_agent=last_value_agent,
|
| 141 |
+
last_points_agent=last_points_agent,
|
| 142 |
+
last_split_coagent=last_split_coagent,
|
| 143 |
+
last_value_coagent=last_value_coagent,
|
| 144 |
+
last_points_coagent=last_points_coagent,
|
| 145 |
+
other_value=self.state.values[other_id],
|
| 146 |
+
last_quantities=self.state.previous_quantities,
|
| 147 |
+
)
|
| 148 |
+
return obs
|
| 149 |
+
|
| 150 |
+
def reset(self):
|
| 151 |
+
start_agent = self.agent_ids[self._starting_agent_index]
|
| 152 |
+
quantities = self._sample_quantities()
|
| 153 |
+
values = self._sample_values()
|
| 154 |
+
self.state = NoPressState(
|
| 155 |
+
round_nb=0,
|
| 156 |
+
last_message="",
|
| 157 |
+
current_agent=start_agent,
|
| 158 |
+
quantities=quantities,
|
| 159 |
+
values=values,
|
| 160 |
+
previous_values=None,
|
| 161 |
+
splits={aid: None for aid in self.agent_ids},
|
| 162 |
+
nb_messages_sent={aid: 0 for aid in self.agent_ids},
|
| 163 |
+
split_phase=True,
|
| 164 |
+
previous_splits=None,
|
| 165 |
+
previous_points=None,
|
| 166 |
+
previous_quantities=None,
|
| 167 |
+
)
|
| 168 |
+
return self.get_obs()
|
src_code_for_reproducibility/markov_games/negotiation/tas_agent.py
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from mllm.markov_games.negotiation.nego_agent import NegotiationAgent
|
| 2 |
+
from mllm.markov_games.negotiation.nego_simulation import Split
|
| 3 |
+
from mllm.markov_games.negotiation.tas_simulation import TrustAndSplitObs
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class TrustAndSplitAgent(NegotiationAgent):
|
| 7 |
+
def __init__(self, num_message_chars, *args, **kwargs):
|
| 8 |
+
self.num_message_chars = num_message_chars
|
| 9 |
+
super().__init__(*args, **kwargs)
|
| 10 |
+
self.intro_prompt = (
|
| 11 |
+
"Welcome to an iterated game. You are {agent}. The other agent is {other_agent}.\n"
|
| 12 |
+
"Setup:\n"
|
| 13 |
+
"1. The game has multiple independent rounds.\n"
|
| 14 |
+
"2. In each round, there are multiple items to split between the two agents.\n"
|
| 15 |
+
"3. Both agents are assigned a per-item value between 1 and 20 (inclusive) in each round.\n"
|
| 16 |
+
"4. You can only observe your own per-item values.\n"
|
| 17 |
+
"5. Because assignments are random, both agents are equally likely to have same expected per-item value.\n"
|
| 18 |
+
"\n"
|
| 19 |
+
"Protocol:\n"
|
| 20 |
+
"1. At the start of the round, one agent begins the conversation. The starting role alternates each round.\n"
|
| 21 |
+
"2. Agents exchange a short chat ({quota_messages_per_agent_per_round} messages per round per agent) to negotiate how to split the item.\n"
|
| 22 |
+
" - Use this chat to communicate your private per-item value to make informed proposals.\n"
|
| 23 |
+
"3. After the chat, both agents simultaneously propose the amount of each item they will keep.\n"
|
| 24 |
+
"4. If the total sum of proposals is less than or equal to the item quantity, both agents receive their proposed amounts.\n"
|
| 25 |
+
"5. If the total sum of proposals exceeds the item quantity, they are allocated proportionally.\n"
|
| 26 |
+
"6. Your points for the round = (amount you receive per item) x (your per-item value for that round), added across all items.\n"
|
| 27 |
+
"7. Points are accumulated across rounds.\n"
|
| 28 |
+
"Your goal: {goal}\n"
|
| 29 |
+
)
|
| 30 |
+
self.new_round_prompt = (
|
| 31 |
+
"A New Round Begins\n"
|
| 32 |
+
"The items to split are {quantities}.\n"
|
| 33 |
+
"Your per-item values are {value}."
|
| 34 |
+
)
|
| 35 |
+
self.last_round_prompt = (
|
| 36 |
+
"Last Round Summary:\n"
|
| 37 |
+
" - Items to split: {last_quantities}\n"
|
| 38 |
+
" - Your per-item values: {last_value_agent}\n"
|
| 39 |
+
" - {other_agent}'s per-item values: {last_value_coagent}\n"
|
| 40 |
+
" - You proposed: {last_split_agent}\n"
|
| 41 |
+
" - You earned: {last_points_agent} points\n"
|
| 42 |
+
" - {other_agent} proposed: {last_split_coagent}\n"
|
| 43 |
+
" - {other_agent} earned: {last_points_coagent} points\n"
|
| 44 |
+
" - Round Complete.\n"
|
| 45 |
+
)
|
| 46 |
+
self.send_split_prompt = (
|
| 47 |
+
"Message quota is finished for this round.\n"
|
| 48 |
+
"{other_agent} has finalized their proposal.\n"
|
| 49 |
+
"Submit your finalization now\n"
|
| 50 |
+
"Respond with {proposal_style2}"
|
| 51 |
+
)
|
| 52 |
+
# self.wait_for_message_prompt = "Wait for {other_agent} to send a message..."
|
| 53 |
+
self.wait_for_message_prompt = ""
|
| 54 |
+
self.last_message_prompt = "{other_agent} said: {last_message}"
|
| 55 |
+
# self.send_message_prompt = (
|
| 56 |
+
# f"Send your message now (max {self.num_message_chars} chars)."
|
| 57 |
+
# )
|
| 58 |
+
self.send_message_prompt = f"Send your message now in <message>...</message> (<={self.num_message_chars} chars)."
|
| 59 |
+
|
| 60 |
+
def get_message_regex(self, observation: TrustAndSplitObs) -> str:
|
| 61 |
+
return rf"<message>[\s\S]{{0,{self.num_message_chars}}}</message>"
|
| 62 |
+
|
| 63 |
+
# def get_message_regex(self, observation: TrustAndSplitObs) -> str:
|
| 64 |
+
# return rf"(?s).{{0,{self.num_message_chars}}}"
|
| 65 |
+
|
| 66 |
+
def get_split_regex(self, observation: TrustAndSplitObs) -> str:
|
| 67 |
+
items = list(observation.quantities.keys())
|
| 68 |
+
# Accept both singular and plural forms
|
| 69 |
+
item_pattern = "|".join(
|
| 70 |
+
[f"{item[:-1]}s?" if item.endswith("s") else f"{item}s?" for item in items]
|
| 71 |
+
)
|
| 72 |
+
regex = rf"(?i)<items_to_self> ?((?:\s*(?P<num>(10|[0-9]))\s*(?P<item>{item_pattern})\s*,?)+) ?</items_to_self>"
|
| 73 |
+
return regex
|
| 74 |
+
|
| 75 |
+
def get_split_action(
|
| 76 |
+
self, policy_output: str, observation: TrustAndSplitObs
|
| 77 |
+
) -> Split:
|
| 78 |
+
items = list(observation.quantities.keys())
|
| 79 |
+
import re as _re
|
| 80 |
+
|
| 81 |
+
split_regex = self.get_split_regex(observation)
|
| 82 |
+
items_given_to_self = {item: 0 for item in items}
|
| 83 |
+
m = _re.match(split_regex, policy_output.strip())
|
| 84 |
+
if m:
|
| 85 |
+
# Find all (number, item) pairs
|
| 86 |
+
item_pattern = "|".join(
|
| 87 |
+
[
|
| 88 |
+
f"{item[:-1]}s?" if item.endswith("s") else f"{item}s?"
|
| 89 |
+
for item in items
|
| 90 |
+
]
|
| 91 |
+
)
|
| 92 |
+
inner_regex = rf"(?i)(10|[0-9])\s*({item_pattern})"
|
| 93 |
+
|
| 94 |
+
def normalize_item_name(item_str):
|
| 95 |
+
for orig in items:
|
| 96 |
+
if item_str.lower() == orig.lower():
|
| 97 |
+
return orig
|
| 98 |
+
if orig.endswith("s") and item_str.lower() == orig[:-1].lower():
|
| 99 |
+
return orig
|
| 100 |
+
if (
|
| 101 |
+
not orig.endswith("s")
|
| 102 |
+
and item_str.lower() == orig.lower() + "s"
|
| 103 |
+
):
|
| 104 |
+
return orig
|
| 105 |
+
|
| 106 |
+
for num, item in _re.findall(inner_regex, m.group(1)):
|
| 107 |
+
items_given_to_self[normalize_item_name(item)] = int(num)
|
| 108 |
+
return Split(items_given_to_self=items_given_to_self)
|
src_code_for_reproducibility/markov_games/negotiation/tas_rps_simulation.py
ADDED
|
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Trust-and-Split simulation.
|
| 3 |
+
|
| 4 |
+
This environment models a simple bargaining game over 10 coins with messaging.
|
| 5 |
+
Agents are assigned rock/paper/scissors hands, with the winner getting value 10 per coin
|
| 6 |
+
and the loser getting value 1 per coin. Agents alternate sending messages for a fixed
|
| 7 |
+
number of turns per round and then each submits a split proposal indicating how many
|
| 8 |
+
coins they keep for themselves. Rewards are proportional if the proposed totals exceed 10.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import copy
|
| 12 |
+
from dataclasses import dataclass
|
| 13 |
+
from typing import Any, Dict, List, Literal, Tuple
|
| 14 |
+
|
| 15 |
+
from numpy.random import default_rng
|
| 16 |
+
|
| 17 |
+
from mllm.markov_games.negotiation.nego_simulation import (
|
| 18 |
+
Message,
|
| 19 |
+
NegotiationObs,
|
| 20 |
+
NegotiationSimulation,
|
| 21 |
+
NegotiationState,
|
| 22 |
+
Split,
|
| 23 |
+
compute_tas_style_rewards,
|
| 24 |
+
)
|
| 25 |
+
from mllm.markov_games.rollout_tree import SimulationStepLog
|
| 26 |
+
|
| 27 |
+
AgentId = str
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def _get_rps_winner(
|
| 31 |
+
hand1: Literal["rock", "paper", "scissors"],
|
| 32 |
+
hand2: Literal["rock", "paper", "scissors"],
|
| 33 |
+
) -> Literal["rock", "paper", "scissors"]:
|
| 34 |
+
"""Determine winner of rock-paper-scissors between two hands."""
|
| 35 |
+
if hand1 == hand2:
|
| 36 |
+
raise ValueError("Hands should be different")
|
| 37 |
+
if (
|
| 38 |
+
(hand1 == "rock" and hand2 == "scissors")
|
| 39 |
+
or (hand1 == "paper" and hand2 == "rock")
|
| 40 |
+
or (hand1 == "scissors" and hand2 == "paper")
|
| 41 |
+
):
|
| 42 |
+
return hand1
|
| 43 |
+
else:
|
| 44 |
+
return hand2
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
@dataclass
|
| 48 |
+
class TrustAndSplitRPSState(NegotiationState):
|
| 49 |
+
hands: Dict[
|
| 50 |
+
AgentId, Literal["rock", "paper", "scissors"]
|
| 51 |
+
] # rock, paper, or scissors
|
| 52 |
+
previous_hands: Dict[AgentId, Literal["rock", "paper", "scissors"]] | None
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
@dataclass
|
| 56 |
+
class TrustAndSplitRPSObs(NegotiationObs):
|
| 57 |
+
hand: Literal["rock", "paper", "scissors"]
|
| 58 |
+
last_hand_agent: Literal["rock", "paper", "scissors"] | None
|
| 59 |
+
last_hand_coagent: Literal["rock", "paper", "scissors"] | None
|
| 60 |
+
last_hand_value_coagent: Literal["upper", "lower"] | None
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class TrustAndSplitRPSSimulation(NegotiationSimulation):
|
| 64 |
+
def __init__(
|
| 65 |
+
self,
|
| 66 |
+
alternating_hands: bool = False,
|
| 67 |
+
alternating_mix_ratio: float = None,
|
| 68 |
+
*args,
|
| 69 |
+
**kwargs,
|
| 70 |
+
):
|
| 71 |
+
self.alternating_hands = alternating_hands
|
| 72 |
+
self.alternating_mix_ratio = alternating_mix_ratio
|
| 73 |
+
super().__init__(*args, **kwargs)
|
| 74 |
+
if self.alternating_mix_ratio is not None:
|
| 75 |
+
if self.rng.random() < self.alternating_mix_ratio:
|
| 76 |
+
self.alternating_hands = True
|
| 77 |
+
else:
|
| 78 |
+
self.alternating_hands = False
|
| 79 |
+
|
| 80 |
+
def _sample_hands_and_values(
|
| 81 |
+
self,
|
| 82 |
+
alternate_hands: bool = False,
|
| 83 |
+
) -> Tuple[Dict[AgentId, str], Dict[AgentId, float]]:
|
| 84 |
+
hands = ["rock", "paper", "scissors"]
|
| 85 |
+
if alternate_hands:
|
| 86 |
+
previous_hands = list(self.state.previous_hands.values())
|
| 87 |
+
hand1, hand2 = self.rng.choice(hands, size=2, replace=False)
|
| 88 |
+
winner = _get_rps_winner(hand1, hand2)
|
| 89 |
+
loser = hand1 if winner == hand2 else hand2
|
| 90 |
+
previous_winner = _get_rps_winner(previous_hands[0], previous_hands[1])
|
| 91 |
+
agent_hands, values = {}, {}
|
| 92 |
+
for agent_id in self.agent_ids:
|
| 93 |
+
if self.state.previous_hands[agent_id] == previous_winner:
|
| 94 |
+
agent_hands[agent_id] = loser
|
| 95 |
+
values[agent_id] = 1.0
|
| 96 |
+
else:
|
| 97 |
+
agent_hands[agent_id] = winner
|
| 98 |
+
values[agent_id] = 10.0
|
| 99 |
+
return agent_hands, values
|
| 100 |
+
else:
|
| 101 |
+
# Assign different hands to each agent
|
| 102 |
+
hand1, hand2 = self.rng.choice(hands, size=2, replace=False)
|
| 103 |
+
|
| 104 |
+
agent_hands = {self.agent_ids[0]: hand1, self.agent_ids[1]: hand2}
|
| 105 |
+
|
| 106 |
+
# Determine winner and assign values
|
| 107 |
+
winner = _get_rps_winner(hand1, hand2)
|
| 108 |
+
values = {}
|
| 109 |
+
for agent_id in self.agent_ids:
|
| 110 |
+
if agent_hands[agent_id] == winner:
|
| 111 |
+
values[agent_id] = 10.0 # Winner gets value 10
|
| 112 |
+
else:
|
| 113 |
+
values[agent_id] = 1.0 # Loser gets value 1
|
| 114 |
+
|
| 115 |
+
return agent_hands, values
|
| 116 |
+
|
| 117 |
+
def set_new_round_of_variant(self):
|
| 118 |
+
self.state.previous_hands = copy.deepcopy(self.state.hands)
|
| 119 |
+
new_hands, new_values = self._sample_hands_and_values(
|
| 120 |
+
alternate_hands=self.alternating_hands
|
| 121 |
+
)
|
| 122 |
+
self.state.hands = new_hands
|
| 123 |
+
self.state.values = new_values
|
| 124 |
+
# Quantities are constant in TAS
|
| 125 |
+
self.state.quantities = {"coins": 10}
|
| 126 |
+
self.state.split_phase = False
|
| 127 |
+
|
| 128 |
+
def get_info_of_variant(
|
| 129 |
+
self, state: NegotiationState, actions: Dict[AgentId, Any]
|
| 130 |
+
) -> Dict[str, Any]:
|
| 131 |
+
return {
|
| 132 |
+
"quantities": copy.deepcopy(state.quantities),
|
| 133 |
+
"hands": copy.deepcopy(state.hands),
|
| 134 |
+
"values": copy.deepcopy(state.values),
|
| 135 |
+
"previous_hands": copy.deepcopy(state.previous_hands),
|
| 136 |
+
"previous_values": copy.deepcopy(state.previous_values),
|
| 137 |
+
"splits": copy.deepcopy(state.splits),
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
def get_rewards(self, splits: Dict[AgentId, Split]) -> Dict[AgentId, float]:
|
| 141 |
+
return compute_tas_style_rewards(
|
| 142 |
+
self.agent_ids, self.state.values, splits, self.state.quantities
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
def get_obs_agent(self, agent_id):
|
| 146 |
+
"""Returns observation for agent_id"""
|
| 147 |
+
other_id = self._other(agent_id)
|
| 148 |
+
last_value_coagent = (
|
| 149 |
+
None
|
| 150 |
+
if self.state.previous_values is None
|
| 151 |
+
else self.state.previous_values.get(other_id)
|
| 152 |
+
)
|
| 153 |
+
last_hand_coagent = (
|
| 154 |
+
None
|
| 155 |
+
if self.state.previous_hands is None
|
| 156 |
+
else self.state.previous_hands.get(other_id)
|
| 157 |
+
)
|
| 158 |
+
last_points_coagent = (
|
| 159 |
+
None
|
| 160 |
+
if self.state.previous_points is None
|
| 161 |
+
else round(self.state.previous_points.get(other_id), 1)
|
| 162 |
+
)
|
| 163 |
+
last_value_agent = (
|
| 164 |
+
None
|
| 165 |
+
if self.state.previous_values is None
|
| 166 |
+
else self.state.previous_values.get(agent_id)
|
| 167 |
+
)
|
| 168 |
+
last_hand_agent = (
|
| 169 |
+
None
|
| 170 |
+
if self.state.previous_hands is None
|
| 171 |
+
else self.state.previous_hands.get(agent_id)
|
| 172 |
+
)
|
| 173 |
+
last_points_agent = (
|
| 174 |
+
None
|
| 175 |
+
if self.state.previous_points is None
|
| 176 |
+
else round(self.state.previous_points.get(agent_id), 1)
|
| 177 |
+
)
|
| 178 |
+
last_split_coagent = None
|
| 179 |
+
last_split_agent = None
|
| 180 |
+
if self.state.previous_splits is not None:
|
| 181 |
+
last_split_coagent = self.state.previous_splits[
|
| 182 |
+
other_id
|
| 183 |
+
].items_given_to_self["coins"]
|
| 184 |
+
last_split_agent = self.state.previous_splits[agent_id].items_given_to_self[
|
| 185 |
+
"coins"
|
| 186 |
+
]
|
| 187 |
+
if last_hand_agent is None or last_hand_coagent is None:
|
| 188 |
+
last_hand_value_coagent = None
|
| 189 |
+
else:
|
| 190 |
+
winner = _get_rps_winner(last_hand_agent, last_hand_coagent)
|
| 191 |
+
last_hand_value_coagent = (
|
| 192 |
+
"upper" if winner == last_hand_coagent else "lower"
|
| 193 |
+
)
|
| 194 |
+
obs = TrustAndSplitRPSObs(
|
| 195 |
+
round_nb=self.state.round_nb,
|
| 196 |
+
last_message=self.state.last_message,
|
| 197 |
+
quota_messages_per_agent_per_round=self.quota_messages_per_agent_per_round,
|
| 198 |
+
current_agent=self.state.current_agent,
|
| 199 |
+
other_agent=self.agent_id_to_name[other_id],
|
| 200 |
+
quantities={"coins": 10},
|
| 201 |
+
item_types=self.item_types,
|
| 202 |
+
value=self.state.values[agent_id],
|
| 203 |
+
split_phase=self.state.split_phase,
|
| 204 |
+
last_split_agent=last_split_agent,
|
| 205 |
+
last_value_agent=last_value_agent,
|
| 206 |
+
last_points_agent=last_points_agent,
|
| 207 |
+
last_split_coagent=last_split_coagent,
|
| 208 |
+
last_value_coagent=last_value_coagent,
|
| 209 |
+
last_points_coagent=last_points_coagent,
|
| 210 |
+
hand=self.state.hands[agent_id],
|
| 211 |
+
last_hand_coagent=last_hand_coagent,
|
| 212 |
+
last_hand_agent=last_hand_agent,
|
| 213 |
+
last_quantities=self.state.previous_quantities,
|
| 214 |
+
last_hand_value_coagent=last_hand_value_coagent,
|
| 215 |
+
)
|
| 216 |
+
return obs
|
| 217 |
+
|
| 218 |
+
def get_state(self):
|
| 219 |
+
return self.state
|
| 220 |
+
|
| 221 |
+
def get_safe_copy(self):
|
| 222 |
+
"""Return a safe copy of the simulation."""
|
| 223 |
+
simulation_copy = copy.copy(self)
|
| 224 |
+
simulation_copy.state = copy.deepcopy(self.state)
|
| 225 |
+
return simulation_copy
|
| 226 |
+
|
| 227 |
+
def reset(self):
|
| 228 |
+
"""Initialize and return initial observations"""
|
| 229 |
+
# Decide starting agent alternating across resets for determinism
|
| 230 |
+
start_agent = self.agent_ids[self._starting_agent_index]
|
| 231 |
+
hands, values = self._sample_hands_and_values()
|
| 232 |
+
self.state = TrustAndSplitRPSState(
|
| 233 |
+
round_nb=0,
|
| 234 |
+
last_message="",
|
| 235 |
+
current_agent=start_agent,
|
| 236 |
+
quantities={"coins": 10},
|
| 237 |
+
values=values,
|
| 238 |
+
splits={aid: None for aid in self.agent_ids},
|
| 239 |
+
nb_messages_sent={aid: 0 for aid in self.agent_ids},
|
| 240 |
+
previous_values=None,
|
| 241 |
+
previous_splits=None,
|
| 242 |
+
previous_points=None,
|
| 243 |
+
split_phase=False,
|
| 244 |
+
hands=hands,
|
| 245 |
+
previous_hands=None,
|
| 246 |
+
previous_quantities=None,
|
| 247 |
+
)
|
| 248 |
+
return self.get_obs()
|
src_code_for_reproducibility/markov_games/negotiation/tas_simple_simulation.py
ADDED
|
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
from collections import defaultdict
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
from typing import Any, Dict, List, Literal
|
| 5 |
+
|
| 6 |
+
from numpy.random import default_rng
|
| 7 |
+
|
| 8 |
+
from mllm.markov_games.negotiation.nego_simulation import (
|
| 9 |
+
NegotiationObs,
|
| 10 |
+
NegotiationSimulation,
|
| 11 |
+
NegotiationState,
|
| 12 |
+
Split,
|
| 13 |
+
compute_tas_style_rewards,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
AgentId = str
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
@dataclass
|
| 20 |
+
class TrustAndSplitSimpleState(NegotiationState):
|
| 21 |
+
pass
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@dataclass
|
| 25 |
+
class TrustAndSplitSimpleObs(NegotiationObs):
|
| 26 |
+
last_value_str_coagent: str | None
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class TrustAndSplitSimpleSimulation(NegotiationSimulation):
|
| 30 |
+
def __init__(
|
| 31 |
+
self,
|
| 32 |
+
game_type: Literal["10-1-exclusive", "1-to-10"] = "1-to-10",
|
| 33 |
+
dist_type: Literal["uniform", "bimodal"] = "uniform",
|
| 34 |
+
beta_dist_alpha: float = 0.1,
|
| 35 |
+
beta_dist_beta: float = 0.1,
|
| 36 |
+
*args,
|
| 37 |
+
**kwargs,
|
| 38 |
+
):
|
| 39 |
+
self.game_type = game_type
|
| 40 |
+
self.dist_type = dist_type
|
| 41 |
+
self.beta_dist_alpha = beta_dist_alpha
|
| 42 |
+
self.beta_dist_beta = beta_dist_beta
|
| 43 |
+
super().__init__(*args, **kwargs)
|
| 44 |
+
|
| 45 |
+
def _sample_values(self) -> Dict[AgentId, dict]:
|
| 46 |
+
values = {}
|
| 47 |
+
while True:
|
| 48 |
+
if self.game_type == "10-1-exclusive":
|
| 49 |
+
v = int(self.rng.choice([1, 10]))
|
| 50 |
+
values[self.agent_ids[0]] = v
|
| 51 |
+
values[self.agent_ids[1]] = 10 if v == 1 else 1
|
| 52 |
+
elif self.game_type == "1-to-10":
|
| 53 |
+
for aid in self.agent_ids:
|
| 54 |
+
if self.dist_type == "uniform":
|
| 55 |
+
values[aid] = int(self.rng.integers(1, 11))
|
| 56 |
+
elif self.dist_type == "bimodal":
|
| 57 |
+
alpha, beta = self.beta_dist_alpha, self.beta_dist_beta
|
| 58 |
+
values[aid] = int(round(self.rng.beta(alpha, beta) * 9) + 1)
|
| 59 |
+
if len(set(values.values())) != 1:
|
| 60 |
+
break
|
| 61 |
+
return values
|
| 62 |
+
|
| 63 |
+
def _sample_quantities(self) -> Dict[str, int]:
|
| 64 |
+
return {"coins": 10}
|
| 65 |
+
|
| 66 |
+
def set_new_round_of_variant(self):
|
| 67 |
+
self.state.quantities = self._sample_quantities()
|
| 68 |
+
self.state.values = self._sample_values()
|
| 69 |
+
self.state.split_phase = False
|
| 70 |
+
|
| 71 |
+
def get_info_of_variant(
|
| 72 |
+
self, state: NegotiationState, actions: Dict[AgentId, Any]
|
| 73 |
+
) -> Dict[str, Any]:
|
| 74 |
+
return {
|
| 75 |
+
"quantities": copy.deepcopy(state.quantities),
|
| 76 |
+
"values": copy.deepcopy(state.values),
|
| 77 |
+
# "previous_values": copy.deepcopy(state.previous_values),
|
| 78 |
+
"splits": copy.deepcopy(state.splits),
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
def get_rewards(self, splits: Dict[AgentId, Split]) -> Dict[AgentId, float]:
|
| 82 |
+
return compute_tas_style_rewards(
|
| 83 |
+
self.agent_ids, self.state.values, splits, self.state.quantities
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
def get_obs(self):
|
| 87 |
+
return {agent_id: self.get_obs_agent(agent_id) for agent_id in self.agent_ids}
|
| 88 |
+
|
| 89 |
+
def get_obs_agent(self, agent_id):
|
| 90 |
+
other_id = self._other(agent_id)
|
| 91 |
+
last_value_coagent = (
|
| 92 |
+
None
|
| 93 |
+
if self.state.previous_values is None
|
| 94 |
+
else self.state.previous_values.get(other_id)
|
| 95 |
+
)
|
| 96 |
+
last_points_coagent = (
|
| 97 |
+
None
|
| 98 |
+
if self.state.previous_points is None
|
| 99 |
+
else round(self.state.previous_points.get(other_id), 1)
|
| 100 |
+
)
|
| 101 |
+
last_value_agent = (
|
| 102 |
+
None
|
| 103 |
+
if self.state.previous_values is None
|
| 104 |
+
else self.state.previous_values.get(agent_id)
|
| 105 |
+
)
|
| 106 |
+
last_points_agent = (
|
| 107 |
+
None
|
| 108 |
+
if self.state.previous_points is None
|
| 109 |
+
else round(self.state.previous_points.get(agent_id), 1)
|
| 110 |
+
)
|
| 111 |
+
last_split_coagent = None
|
| 112 |
+
last_split_agent = None
|
| 113 |
+
if self.state.previous_splits is not None:
|
| 114 |
+
last_split_coagent = self.state.previous_splits[
|
| 115 |
+
other_id
|
| 116 |
+
].items_given_to_self["coins"]
|
| 117 |
+
last_split_agent = self.state.previous_splits[agent_id].items_given_to_self[
|
| 118 |
+
"coins"
|
| 119 |
+
]
|
| 120 |
+
if last_value_agent is None or last_value_coagent is None:
|
| 121 |
+
last_value_str_coagent = None
|
| 122 |
+
else:
|
| 123 |
+
if last_value_coagent > last_value_agent:
|
| 124 |
+
last_value_str_coagent = "higher"
|
| 125 |
+
elif last_value_coagent < last_value_agent:
|
| 126 |
+
last_value_str_coagent = "lower"
|
| 127 |
+
else:
|
| 128 |
+
raise ValueError("Should not be equal values")
|
| 129 |
+
|
| 130 |
+
obs = TrustAndSplitSimpleObs(
|
| 131 |
+
round_nb=self.state.round_nb,
|
| 132 |
+
last_message=self.state.last_message,
|
| 133 |
+
quota_messages_per_agent_per_round=self.quota_messages_per_agent_per_round,
|
| 134 |
+
current_agent=self.state.current_agent,
|
| 135 |
+
other_agent=self.agent_id_to_name[other_id],
|
| 136 |
+
quantities=self.state.quantities,
|
| 137 |
+
item_types=self.item_types,
|
| 138 |
+
value=self.state.values[agent_id],
|
| 139 |
+
split_phase=self.state.split_phase,
|
| 140 |
+
last_split_agent=last_split_agent,
|
| 141 |
+
last_value_agent=last_value_agent,
|
| 142 |
+
last_points_agent=last_points_agent,
|
| 143 |
+
last_split_coagent=last_split_coagent,
|
| 144 |
+
last_value_coagent=last_value_coagent,
|
| 145 |
+
last_points_coagent=last_points_coagent,
|
| 146 |
+
last_quantities=self.state.previous_quantities,
|
| 147 |
+
last_value_str_coagent=last_value_str_coagent,
|
| 148 |
+
)
|
| 149 |
+
return obs
|
| 150 |
+
|
| 151 |
+
def reset(self):
|
| 152 |
+
start_agent = self.agent_ids[self._starting_agent_index]
|
| 153 |
+
quantities = self._sample_quantities()
|
| 154 |
+
values = self._sample_values()
|
| 155 |
+
self.state = TrustAndSplitSimpleState(
|
| 156 |
+
round_nb=0,
|
| 157 |
+
last_message="",
|
| 158 |
+
current_agent=start_agent,
|
| 159 |
+
quantities=quantities,
|
| 160 |
+
values=values,
|
| 161 |
+
previous_values=None,
|
| 162 |
+
splits={aid: None for aid in self.agent_ids},
|
| 163 |
+
nb_messages_sent={aid: 0 for aid in self.agent_ids},
|
| 164 |
+
split_phase=False,
|
| 165 |
+
previous_splits=None,
|
| 166 |
+
previous_points=None,
|
| 167 |
+
previous_quantities=None,
|
| 168 |
+
)
|
| 169 |
+
return self.get_obs()
|
src_code_for_reproducibility/markov_games/vine_ppo.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from anytree import Node, RenderTree
|
| 2 |
+
from anytree.exporter import DotExporter
|
| 3 |
+
import os.path
|
| 4 |
+
import asyncio
|
| 5 |
+
from mllm.markov_games.markov_game import MarkovGame
|
| 6 |
+
|
| 7 |
+
async def VinePPORunner(
|
| 8 |
+
markov_game: MarkovGame,
|
| 9 |
+
**kwargs):
|
| 10 |
+
pass
|
src_code_for_reproducibility/models/__init__.py
ADDED
|
File without changes
|
src_code_for_reproducibility/models/adapter_training_wrapper.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import logging
|
| 4 |
+
from typing import Union
|
| 5 |
+
from peft import (
|
| 6 |
+
LoraConfig,
|
| 7 |
+
get_peft_model,
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
logger = logging.getLogger(__name__)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class AdapterWrapper(nn.Module):
|
| 14 |
+
"""
|
| 15 |
+
A thin façade that
|
| 16 |
+
• keeps a reference to a *shared* PEFT-wrapped model,
|
| 17 |
+
• ensures `set_adapter(adapter)` is called on every forward,
|
| 18 |
+
• exposes only the parameters that should be trained for that adapter
|
| 19 |
+
(plus whatever extra modules you name).
|
| 20 |
+
"""
|
| 21 |
+
def __init__(
|
| 22 |
+
self,
|
| 23 |
+
shared_llm: nn.Module,
|
| 24 |
+
adapter_id: str,
|
| 25 |
+
lora_config: dict,
|
| 26 |
+
path: Union[str, None] = None,
|
| 27 |
+
):
|
| 28 |
+
super().__init__()
|
| 29 |
+
self.shared_llm = shared_llm
|
| 30 |
+
self.adapter_id = adapter_id
|
| 31 |
+
lora_config = LoraConfig(**lora_config)
|
| 32 |
+
# this modifies the shared llm in place, adding a lora adapter inside
|
| 33 |
+
self.shared_llm = get_peft_model(
|
| 34 |
+
model=shared_llm,
|
| 35 |
+
peft_config=lora_config,
|
| 36 |
+
adapter_name=adapter_id,
|
| 37 |
+
)
|
| 38 |
+
self.shared_llm.train()
|
| 39 |
+
# Load external adapter weights if provided
|
| 40 |
+
loaded_from: str | None = None
|
| 41 |
+
if path:
|
| 42 |
+
try:
|
| 43 |
+
# Supports both local filesystem paths and HF Hub repo IDs
|
| 44 |
+
self.shared_llm.load_adapter(
|
| 45 |
+
is_trainable=True,
|
| 46 |
+
model_id=path,
|
| 47 |
+
adapter_name=adapter_id,
|
| 48 |
+
)
|
| 49 |
+
loaded_from = path
|
| 50 |
+
except Exception as exc: # noqa: BLE001 - want to log any load failure context
|
| 51 |
+
logger.warning(
|
| 52 |
+
f"Adapter '{adapter_id}': failed to load from '{path}': {exc}"
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
if loaded_from:
|
| 56 |
+
logger.info(
|
| 57 |
+
f"Adapter '{adapter_id}': loaded initial weights from '{loaded_from}'."
|
| 58 |
+
)
|
| 59 |
+
else:
|
| 60 |
+
logger.info(
|
| 61 |
+
f"Adapter '{adapter_id}': initialized with fresh weights (no initial weights found)."
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
def parameters(self, recurse: bool = True):
|
| 65 |
+
"""
|
| 66 |
+
"recurse" is just for pytorch compatibility
|
| 67 |
+
"""
|
| 68 |
+
self.shared_llm.set_adapter(self.adapter_id)
|
| 69 |
+
params = [p for p in self.shared_llm.parameters() if p.requires_grad]
|
| 70 |
+
|
| 71 |
+
return params
|
| 72 |
+
|
| 73 |
+
def get_base_model_logits(self, contexts):
|
| 74 |
+
"""
|
| 75 |
+
Run the base model (without adapter) in inference mode, without tracking gradients.
|
| 76 |
+
This is useful to get reference logits for KL-divergence computation.
|
| 77 |
+
"""
|
| 78 |
+
with torch.no_grad():
|
| 79 |
+
with self.shared_llm.disable_adapter():
|
| 80 |
+
return self.shared_llm(input_ids=contexts)[0]
|
| 81 |
+
|
| 82 |
+
def forward(self, *args, **kwargs):
|
| 83 |
+
self.shared_llm.set_adapter(self.adapter_id)
|
| 84 |
+
return self.shared_llm(*args, **kwargs)
|
| 85 |
+
|
| 86 |
+
def save_pretrained(self, save_path):
|
| 87 |
+
self.shared_llm.save_pretrained(save_path)
|
| 88 |
+
|
| 89 |
+
def gradient_checkpointing_enable(self, *args, **kwargs):
|
| 90 |
+
self.shared_llm.gradient_checkpointing_enable(*args, **kwargs)
|
| 91 |
+
|
| 92 |
+
@property
|
| 93 |
+
def dtype(self):
|
| 94 |
+
return self.shared_llm.dtype
|
| 95 |
+
|
| 96 |
+
@property
|
| 97 |
+
def device(self):
|
| 98 |
+
return self.shared_llm.device
|
src_code_for_reproducibility/models/human_policy.py
ADDED
|
@@ -0,0 +1,255 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import os
|
| 3 |
+
import re
|
| 4 |
+
import shutil
|
| 5 |
+
import sys
|
| 6 |
+
from typing import Callable, Dict, List, Optional
|
| 7 |
+
|
| 8 |
+
from mllm.markov_games.rollout_tree import ChatTurn
|
| 9 |
+
|
| 10 |
+
try:
|
| 11 |
+
import rstr # For generating example strings from regex
|
| 12 |
+
except Exception: # pragma: no cover
|
| 13 |
+
rstr = None
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def _clear_terminal() -> None:
|
| 17 |
+
"""
|
| 18 |
+
Clear the terminal screen in a cross-platform manner.
|
| 19 |
+
"""
|
| 20 |
+
if sys.stdout.isatty():
|
| 21 |
+
os.system("cls" if os.name == "nt" else "clear")
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def _terminal_width(default: int = 100) -> int:
|
| 25 |
+
try:
|
| 26 |
+
return shutil.get_terminal_size().columns
|
| 27 |
+
except Exception:
|
| 28 |
+
return default
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def _horizontal_rule(char: str = "─") -> str:
|
| 32 |
+
width = max(20, _terminal_width() - 2)
|
| 33 |
+
return char * width
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class _Style:
|
| 37 |
+
# ANSI colors (bright, readable)
|
| 38 |
+
RESET = "\033[0m"
|
| 39 |
+
BOLD = "\033[1m"
|
| 40 |
+
DIM = "\033[2m"
|
| 41 |
+
# Foreground colors
|
| 42 |
+
FG_BLUE = "\033[94m" # user/system headers
|
| 43 |
+
FG_GREEN = "\033[92m" # human response header
|
| 44 |
+
FG_YELLOW = "\033[93m" # notices
|
| 45 |
+
FG_RED = "\033[91m" # errors
|
| 46 |
+
FG_MAGENTA = "\033[95m" # regex
|
| 47 |
+
FG_CYAN = "\033[96m" # tips
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def _render_chat(state) -> str:
|
| 51 |
+
"""
|
| 52 |
+
Render prior messages in a compact, readable terminal format.
|
| 53 |
+
|
| 54 |
+
Expected message dict keys: {"role": str, "content": str, ...}
|
| 55 |
+
"""
|
| 56 |
+
lines: List[str] = []
|
| 57 |
+
lines.append(_horizontal_rule())
|
| 58 |
+
lines.append(f"{_Style.FG_BLUE}{_Style.BOLD} Conversation so far {_Style.RESET}")
|
| 59 |
+
lines.append(_horizontal_rule())
|
| 60 |
+
for chat in state:
|
| 61 |
+
role = chat.role
|
| 62 |
+
content = str(chat.content).strip()
|
| 63 |
+
# Map roles to display names and colors/emojis
|
| 64 |
+
if role == "assistant":
|
| 65 |
+
header = f"{_Style.FG_GREEN}{_Style.BOLD}HUMAN--🧑💻{_Style.RESET}"
|
| 66 |
+
elif role == "user":
|
| 67 |
+
header = f"{_Style.FG_BLUE}{_Style.BOLD}USER--⚙️{_Style.RESET}"
|
| 68 |
+
else:
|
| 69 |
+
header = f"[{_Style.DIM}{role.upper()}{_Style.RESET}]"
|
| 70 |
+
lines.append(header)
|
| 71 |
+
# Indent content for readability
|
| 72 |
+
for line in content.splitlines() or [""]:
|
| 73 |
+
lines.append(f" {line}")
|
| 74 |
+
lines.append("")
|
| 75 |
+
lines.append(_horizontal_rule())
|
| 76 |
+
return "\n".join(lines)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
async def _async_input(prompt_text: str) -> str:
|
| 80 |
+
"""Non-blocking input using a background thread."""
|
| 81 |
+
return await asyncio.to_thread(input, prompt_text)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def _short_regex_example(regex: str, max_len: int = 30) -> Optional[str]:
|
| 85 |
+
"""
|
| 86 |
+
Try to produce a short example string that matches the regex.
|
| 87 |
+
We attempt multiple times and pick the first <= max_len.
|
| 88 |
+
"""
|
| 89 |
+
if rstr is None:
|
| 90 |
+
return None
|
| 91 |
+
try:
|
| 92 |
+
for _ in range(20):
|
| 93 |
+
candidate = rstr.xeger(regex)
|
| 94 |
+
if len(candidate) <= max_len:
|
| 95 |
+
return candidate
|
| 96 |
+
# Fallback to truncation (may break match, so don't return)
|
| 97 |
+
return None
|
| 98 |
+
except Exception:
|
| 99 |
+
return None
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def _detect_input_type(regex: str | None) -> tuple[str, str, str]:
|
| 103 |
+
"""
|
| 104 |
+
Detect what type of input is expected based on the regex pattern.
|
| 105 |
+
Returns (input_type, start_tag, end_tag)
|
| 106 |
+
"""
|
| 107 |
+
if regex is None:
|
| 108 |
+
return "text", "", ""
|
| 109 |
+
|
| 110 |
+
if "message_start" in regex and "message_end" in regex:
|
| 111 |
+
return "message", "<<message_start>>", "<<message_end>>"
|
| 112 |
+
elif "proposal_start" in regex and "proposal_end" in regex:
|
| 113 |
+
return "proposal", "<<proposal_start>>", "<<proposal_end>>"
|
| 114 |
+
else:
|
| 115 |
+
return "text", "", ""
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
async def human_policy(state, agent_id, regex: str | None = None) -> str:
|
| 119 |
+
"""
|
| 120 |
+
Async human-in-the-loop policy.
|
| 121 |
+
|
| 122 |
+
- Displays prior conversation context in the terminal.
|
| 123 |
+
- Prompts the user for a response.
|
| 124 |
+
- If a regex is provided, validates and re-prompts until it matches.
|
| 125 |
+
- Automatically adds formatting tags based on expected input type.
|
| 126 |
+
|
| 127 |
+
Args:
|
| 128 |
+
prompt: Chat history as a list of {role, content} dicts.
|
| 129 |
+
regex: Optional fullmatch validation pattern.
|
| 130 |
+
|
| 131 |
+
Returns:
|
| 132 |
+
The user's validated response string.
|
| 133 |
+
"""
|
| 134 |
+
# Detect input type and formatting
|
| 135 |
+
input_type, start_tag, end_tag = _detect_input_type(regex)
|
| 136 |
+
|
| 137 |
+
while True:
|
| 138 |
+
_clear_terminal()
|
| 139 |
+
print(_render_chat(state))
|
| 140 |
+
|
| 141 |
+
if regex:
|
| 142 |
+
example = _short_regex_example(regex, max_len=30)
|
| 143 |
+
print(
|
| 144 |
+
f"{_Style.FG_MAGENTA}{_Style.BOLD}Expected format (regex fullmatch):{_Style.RESET}"
|
| 145 |
+
)
|
| 146 |
+
print(f" {_Style.FG_MAGENTA}{regex}{_Style.RESET}")
|
| 147 |
+
if example:
|
| 148 |
+
print(
|
| 149 |
+
f"{_Style.FG_CYAN}Example (random, <=30 chars):{_Style.RESET} {example}"
|
| 150 |
+
)
|
| 151 |
+
print(_horizontal_rule("."))
|
| 152 |
+
|
| 153 |
+
# Custom prompt based on input type
|
| 154 |
+
if input_type == "message":
|
| 155 |
+
print(
|
| 156 |
+
f"{_Style.FG_YELLOW}Type your message content (formatting will be added automatically):{_Style.RESET}"
|
| 157 |
+
)
|
| 158 |
+
elif input_type == "proposal":
|
| 159 |
+
print(
|
| 160 |
+
f"{_Style.FG_YELLOW}Type your proposal (number only, formatting will be added automatically):{_Style.RESET}"
|
| 161 |
+
)
|
| 162 |
+
else:
|
| 163 |
+
print(
|
| 164 |
+
f"{_Style.FG_YELLOW}Type your response and press Enter.{_Style.RESET}"
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
print(
|
| 168 |
+
f"{_Style.DIM}Commands: /help to view commands, /refresh to re-render, /quit to abort{_Style.RESET}"
|
| 169 |
+
)
|
| 170 |
+
else:
|
| 171 |
+
print(
|
| 172 |
+
f"{_Style.FG_YELLOW}Type your response and press Enter.{_Style.RESET} {_Style.DIM}(/help for commands){_Style.RESET}"
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
user_in = (await _async_input("> ")).rstrip("\n")
|
| 176 |
+
|
| 177 |
+
# Commands
|
| 178 |
+
if user_in.strip().lower() in {"/help", "/h"}:
|
| 179 |
+
print(f"\n{_Style.FG_CYAN}{_Style.BOLD}Available commands:{_Style.RESET}")
|
| 180 |
+
print(
|
| 181 |
+
f" {_Style.FG_CYAN}/help{_Style.RESET} or {_Style.FG_CYAN}/h{_Style.RESET} Show this help"
|
| 182 |
+
)
|
| 183 |
+
print(
|
| 184 |
+
f" {_Style.FG_CYAN}/refresh{_Style.RESET} or {_Style.FG_CYAN}/r{_Style.RESET} Re-render the conversation and prompt"
|
| 185 |
+
)
|
| 186 |
+
print(
|
| 187 |
+
f" {_Style.FG_CYAN}/quit{_Style.RESET} or {_Style.FG_CYAN}/q{_Style.RESET} Abort the run (raises KeyboardInterrupt)"
|
| 188 |
+
)
|
| 189 |
+
await asyncio.sleep(1.0)
|
| 190 |
+
continue
|
| 191 |
+
if user_in.strip().lower() in {"/refresh", "/r"}:
|
| 192 |
+
continue
|
| 193 |
+
if user_in.strip().lower() in {"/quit", "/q"}:
|
| 194 |
+
raise KeyboardInterrupt("Human aborted run from human_policy")
|
| 195 |
+
|
| 196 |
+
# Add formatting tags if needed
|
| 197 |
+
if start_tag and end_tag:
|
| 198 |
+
formatted_input = f"{start_tag}{user_in}{end_tag}"
|
| 199 |
+
else:
|
| 200 |
+
formatted_input = user_in
|
| 201 |
+
|
| 202 |
+
if regex is None:
|
| 203 |
+
return ChatTurn(
|
| 204 |
+
role="assistant", agent_id=agent_id, content=formatted_input
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
# Validate against regex (fullmatch)
|
| 208 |
+
try:
|
| 209 |
+
pattern = re.compile(regex)
|
| 210 |
+
except re.error as e:
|
| 211 |
+
# If regex is invalid, fall back to accepting any input
|
| 212 |
+
print(
|
| 213 |
+
f"{_Style.FG_RED}Warning:{_Style.RESET} Provided regex is invalid: {e}. Accepting input without validation."
|
| 214 |
+
)
|
| 215 |
+
await asyncio.sleep(0.5)
|
| 216 |
+
return ChatTurn(
|
| 217 |
+
role="assistant", agent_id=agent_id, content=formatted_input
|
| 218 |
+
)
|
| 219 |
+
|
| 220 |
+
if pattern.fullmatch(formatted_input):
|
| 221 |
+
return ChatTurn(
|
| 222 |
+
role="assistant", agent_id=agent_id, content=formatted_input
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
# Show validation error and re-prompt
|
| 226 |
+
print("")
|
| 227 |
+
print(
|
| 228 |
+
f"{_Style.FG_RED}{_Style.BOLD}Input did not match the required format.{_Style.RESET} Please try again."
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
if input_type == "message":
|
| 232 |
+
print(
|
| 233 |
+
f"You entered: {_Style.FG_CYAN}{start_tag}{user_in}{end_tag}{_Style.RESET}"
|
| 234 |
+
)
|
| 235 |
+
print(f"Just type the message content without tags.")
|
| 236 |
+
elif input_type == "proposal":
|
| 237 |
+
print(
|
| 238 |
+
f"You entered: {_Style.FG_CYAN}{start_tag}{user_in}{end_tag}{_Style.RESET}"
|
| 239 |
+
)
|
| 240 |
+
print(f"Just type the number without tags.")
|
| 241 |
+
else:
|
| 242 |
+
print(f"Expected (regex):")
|
| 243 |
+
print(f" {_Style.FG_MAGENTA}{regex}{_Style.RESET}")
|
| 244 |
+
|
| 245 |
+
print(_horizontal_rule("."))
|
| 246 |
+
print(f"{_Style.FG_YELLOW}Press Enter to retry...{_Style.RESET}")
|
| 247 |
+
await _async_input("")
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
def get_human_policies() -> Dict[str, Callable[[List[Dict]], str]]:
|
| 251 |
+
"""
|
| 252 |
+
Expose the human policy in the same map shape used elsewhere.
|
| 253 |
+
"""
|
| 254 |
+
# Type hint says Callable[[List[Dict]], str] but we intentionally return the async callable.
|
| 255 |
+
return {"human_policy": human_policy} # type: ignore[return-value]
|
src_code_for_reproducibility/models/inference_backend.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from abc import ABC, abstractmethod
|
| 2 |
+
from dataclasses import dataclass
|
| 3 |
+
from typing import Any, Optional
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@dataclass
|
| 7 |
+
class LLMInferenceOutput:
|
| 8 |
+
content: str
|
| 9 |
+
reasoning_content: str | None = None
|
| 10 |
+
log_probs: list[float] | None = None
|
| 11 |
+
out_token_ids: list[int] | None = None
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class LLMInferenceBackend(ABC):
|
| 15 |
+
@abstractmethod
|
| 16 |
+
def __init__(self, **kwargs):
|
| 17 |
+
...
|
| 18 |
+
|
| 19 |
+
@abstractmethod
|
| 20 |
+
def prepare_adapter(
|
| 21 |
+
self, adapter_id: str, weights_got_updated: bool = False
|
| 22 |
+
) -> None:
|
| 23 |
+
"""Ensure adapter is ready/loaded for next generation call."""
|
| 24 |
+
|
| 25 |
+
@abstractmethod
|
| 26 |
+
async def generate(self, prompt: list[dict], regex: Optional[str] = None) -> str:
|
| 27 |
+
...
|
| 28 |
+
|
| 29 |
+
@abstractmethod
|
| 30 |
+
def toggle_training_mode(self) -> None:
|
| 31 |
+
...
|
| 32 |
+
|
| 33 |
+
@abstractmethod
|
| 34 |
+
def toggle_eval_mode(self) -> None:
|
| 35 |
+
...
|
| 36 |
+
|
| 37 |
+
@abstractmethod
|
| 38 |
+
def shutdown(self) -> None:
|
| 39 |
+
...
|
src_code_for_reproducibility/models/inference_backend_dummy.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
from typing import Optional
|
| 3 |
+
|
| 4 |
+
import rstr
|
| 5 |
+
from transformers import AutoTokenizer
|
| 6 |
+
|
| 7 |
+
from mllm.models.inference_backend import LLMInferenceBackend, LLMInferenceOutput
|
| 8 |
+
from mllm.utils.short_id_gen import generate_short_id
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class DummyInferenceBackend(LLMInferenceBackend):
|
| 12 |
+
def __init__(
|
| 13 |
+
self,
|
| 14 |
+
*args,
|
| 15 |
+
**kwargs,
|
| 16 |
+
):
|
| 17 |
+
pass
|
| 18 |
+
|
| 19 |
+
def prepare_adapter(
|
| 20 |
+
self,
|
| 21 |
+
adapter_id: Optional[str],
|
| 22 |
+
weights_got_updated: bool,
|
| 23 |
+
adapter_path: Optional[str] = None,
|
| 24 |
+
) -> None:
|
| 25 |
+
pass
|
| 26 |
+
|
| 27 |
+
async def toggle_training_mode(self) -> None:
|
| 28 |
+
await asyncio.sleep(0)
|
| 29 |
+
pass
|
| 30 |
+
|
| 31 |
+
async def toggle_eval_mode(self) -> None:
|
| 32 |
+
await asyncio.sleep(0)
|
| 33 |
+
pass
|
| 34 |
+
|
| 35 |
+
def shutdown(self) -> None:
|
| 36 |
+
pass
|
| 37 |
+
|
| 38 |
+
async def generate(
|
| 39 |
+
self,
|
| 40 |
+
prompt_text: str,
|
| 41 |
+
regex: Optional[str] = None,
|
| 42 |
+
extract_thinking: bool = False,
|
| 43 |
+
) -> LLMInferenceOutput:
|
| 44 |
+
if regex:
|
| 45 |
+
# Create random string that respects the regex
|
| 46 |
+
return LLMInferenceOutput(
|
| 47 |
+
content=rstr.xeger(regex),
|
| 48 |
+
reasoning_content="I don't think, I am a dummy backend.",
|
| 49 |
+
)
|
| 50 |
+
else:
|
| 51 |
+
return LLMInferenceOutput(
|
| 52 |
+
content="I am a dummy backend without a regex.",
|
| 53 |
+
reasoning_content="I don't think, I am a dummy backend.",
|
| 54 |
+
)
|
src_code_for_reproducibility/models/inference_backend_sglang.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# new_backend_sglang_offline.py
|
| 2 |
+
from __future__ import annotations
|
| 3 |
+
|
| 4 |
+
import asyncio
|
| 5 |
+
from typing import Any, Optional
|
| 6 |
+
|
| 7 |
+
# import sglang as sgl
|
| 8 |
+
|
| 9 |
+
from mllm.models.inference_backend import LLMInferenceBackend
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class SGLangOfflineBackend(LLMInferenceBackend):
|
| 13 |
+
def __init__(
|
| 14 |
+
self,
|
| 15 |
+
model_name: str,
|
| 16 |
+
tokenizer, # unused but kept for parity
|
| 17 |
+
adapter_paths: dict[str, str],
|
| 18 |
+
device: str = "cuda",
|
| 19 |
+
max_model_len: Optional[int] = None,
|
| 20 |
+
enable_lora: bool = True,
|
| 21 |
+
lora_target_modules: Optional[list[str] | str] = None,
|
| 22 |
+
max_loras_per_batch: int = 8,
|
| 23 |
+
engine_kwargs: dict[str, Any] = None,
|
| 24 |
+
):
|
| 25 |
+
self.model_name = model_name
|
| 26 |
+
self.adapter_paths = adapter_paths
|
| 27 |
+
self.current_adapter: Optional[str] = None
|
| 28 |
+
engine_kwargs = dict(engine_kwargs or {})
|
| 29 |
+
# Map server-style LoRA flags to offline engine ctor
|
| 30 |
+
if enable_lora and adapter_paths:
|
| 31 |
+
engine_kwargs.setdefault("enable_lora", True)
|
| 32 |
+
# The offline Engine mirrors server args; pass a mapping name->path
|
| 33 |
+
engine_kwargs.setdefault("lora_paths", adapter_paths)
|
| 34 |
+
if lora_target_modules is not None:
|
| 35 |
+
engine_kwargs.setdefault("lora_target_modules", lora_target_modules)
|
| 36 |
+
engine_kwargs.setdefault("max_loras_per_batch", max_loras_per_batch)
|
| 37 |
+
|
| 38 |
+
if max_model_len is not None:
|
| 39 |
+
engine_kwargs.setdefault("context_length", max_model_len)
|
| 40 |
+
|
| 41 |
+
# Launch in-process engine (no HTTP server)
|
| 42 |
+
self.llm = sgl.Engine(model_path=model_name, **engine_kwargs) # async-ready
|
| 43 |
+
# SGLang supports: generate(), async_generate(), and async streaming helpers. :contentReference[oaicite:2]{index=2}
|
| 44 |
+
|
| 45 |
+
def is_ready(self) -> bool:
|
| 46 |
+
return True
|
| 47 |
+
|
| 48 |
+
def toggle_training_mode(self) -> None:
|
| 49 |
+
# No explicit KV release API offline; typically you pause usage here.
|
| 50 |
+
pass
|
| 51 |
+
|
| 52 |
+
def toggle_eval_mode(self) -> None:
|
| 53 |
+
pass
|
| 54 |
+
|
| 55 |
+
def shutdown(self) -> None:
|
| 56 |
+
# Engine cleans up on GC; explicit close not required.
|
| 57 |
+
pass
|
| 58 |
+
|
| 59 |
+
def prepare_adapter(self, adapter_id: Optional[str]) -> None:
|
| 60 |
+
# With offline Engine, when LoRA is enabled at init,
|
| 61 |
+
# you select adapter per request via the input batch mapping.
|
| 62 |
+
self.current_adapter = adapter_id
|
| 63 |
+
|
| 64 |
+
async def generate(
|
| 65 |
+
self, prompt_text: str, sampling_params: dict, adapter_id: Optional[str]
|
| 66 |
+
) -> str:
|
| 67 |
+
# Non-streaming async (batch of 1). For batched prompts, pass a list.
|
| 68 |
+
params = {
|
| 69 |
+
"temperature": sampling_params.get("temperature", 1.0),
|
| 70 |
+
"top_p": sampling_params.get("top_p", 1.0),
|
| 71 |
+
"max_new_tokens": sampling_params.get("max_new_tokens", 128),
|
| 72 |
+
}
|
| 73 |
+
if (tk := sampling_params.get("top_k", -1)) and tk > 0:
|
| 74 |
+
params["top_k"] = tk
|
| 75 |
+
if (mn := sampling_params.get("min_new_tokens")) is not None:
|
| 76 |
+
params["min_new_tokens"] = mn
|
| 77 |
+
if (fp := sampling_params.get("frequency_penalty")) is not None:
|
| 78 |
+
params["frequency_penalty"] = fp
|
| 79 |
+
|
| 80 |
+
# If using multi-LoRA, SGLang lets you provide adapter names aligned to each input.
|
| 81 |
+
prompts = [prompt_text]
|
| 82 |
+
adapters = [adapter_id] if adapter_id else None # or omit for base
|
| 83 |
+
outs = await self.llm.async_generate(
|
| 84 |
+
prompts, params, adapters
|
| 85 |
+
) # :contentReference[oaicite:3]{index=3}
|
| 86 |
+
return outs[0]["text"]
|
src_code_for_reproducibility/models/inference_backend_sglang_local_server.py
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
import httpx
|
| 4 |
+
import requests
|
| 5 |
+
from sglang.utils import launch_server_cmd, wait_for_server
|
| 6 |
+
|
| 7 |
+
from mllm.models.inference_backend import LLMInferenceBackend
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class HttpSGLangBackend(LLMInferenceBackend):
|
| 11 |
+
def __init__(self, **kwargs):
|
| 12 |
+
super().__init__(**kwargs)
|
| 13 |
+
self.port = None
|
| 14 |
+
self.proc = None
|
| 15 |
+
self.urls = {}
|
| 16 |
+
# track sglang adapter ids separately from your logical ids
|
| 17 |
+
self.sglang_names = {aid: aid for aid in self.adapter_paths.keys()}
|
| 18 |
+
self.needs_loading = {aid: True for aid in self.adapter_paths.keys()}
|
| 19 |
+
|
| 20 |
+
# defaults you already used:
|
| 21 |
+
self.mem_fraction = kwargs.get("mem_fraction_static", 0.6)
|
| 22 |
+
self.dtype = kwargs.get("dtype", "bfloat16")
|
| 23 |
+
self.extra_cli = kwargs.get("extra_cli", "")
|
| 24 |
+
self.disable_radix_cache = kwargs.get("disable_radix_cache", True)
|
| 25 |
+
|
| 26 |
+
def launch(self) -> None:
|
| 27 |
+
# find local hf cache path for server
|
| 28 |
+
from transformers.utils import cached_file
|
| 29 |
+
|
| 30 |
+
local_llm_path = os.path.split(cached_file(self.model_name, "config.json"))[0]
|
| 31 |
+
|
| 32 |
+
lora_str = ""
|
| 33 |
+
if self.adapter_paths:
|
| 34 |
+
lora_str = "--lora-paths " + " ".join(
|
| 35 |
+
f"{aid}={path}" for aid, path in self.adapter_paths.items()
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
cmd = f"""
|
| 39 |
+
python3 -m sglang.launch_server --model-path {local_llm_path} \
|
| 40 |
+
--host 0.0.0.0 {lora_str} \
|
| 41 |
+
{'--disable-radix-cache' if self.disable_radix_cache else ''} \
|
| 42 |
+
--mem-fraction-static {self.mem_fraction} --dtype {self.dtype} {self.extra_cli}
|
| 43 |
+
"""
|
| 44 |
+
self.proc, self.port = launch_server_cmd(cmd)
|
| 45 |
+
wait_for_server(f"http://localhost:{self.port}")
|
| 46 |
+
base = f"http://localhost:{self.port}"
|
| 47 |
+
self.urls = dict(
|
| 48 |
+
generate=f"{base}/generate",
|
| 49 |
+
release=f"{base}/release_memory_occupation",
|
| 50 |
+
resume=f"{base}/resume_memory_occupation",
|
| 51 |
+
load_lora=f"{base}/load_lora_adapter",
|
| 52 |
+
unload_lora=f"{base}/unload_lora_adapter",
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
def is_ready(self) -> bool:
|
| 56 |
+
try:
|
| 57 |
+
requests.get(self.urls["generate"], timeout=2)
|
| 58 |
+
return True
|
| 59 |
+
except Exception:
|
| 60 |
+
return False
|
| 61 |
+
|
| 62 |
+
def prepare_adapter(self, adapter_id: str) -> None:
|
| 63 |
+
if adapter_id is None:
|
| 64 |
+
return
|
| 65 |
+
if self.needs_loading.get(adapter_id, False):
|
| 66 |
+
# unload old name if present
|
| 67 |
+
try:
|
| 68 |
+
requests.post(
|
| 69 |
+
self.urls["unload_lora"],
|
| 70 |
+
json={"lora_name": self.sglang_names[adapter_id]},
|
| 71 |
+
timeout=10,
|
| 72 |
+
)
|
| 73 |
+
except Exception:
|
| 74 |
+
pass
|
| 75 |
+
new_name = self._short_id()
|
| 76 |
+
self.sglang_names[adapter_id] = new_name
|
| 77 |
+
requests.post(
|
| 78 |
+
self.urls["load_lora"],
|
| 79 |
+
json={
|
| 80 |
+
"lora_name": new_name,
|
| 81 |
+
"lora_path": self.adapter_paths[adapter_id],
|
| 82 |
+
},
|
| 83 |
+
).raise_for_status()
|
| 84 |
+
self.needs_loading[adapter_id] = False
|
| 85 |
+
|
| 86 |
+
async def generate(
|
| 87 |
+
self, prompt_text: str, sampling_params: dict, adapter_id: str | None
|
| 88 |
+
) -> str:
|
| 89 |
+
lora_name = self.sglang_names.get(adapter_id) if adapter_id else None
|
| 90 |
+
payload = {
|
| 91 |
+
"text": [prompt_text],
|
| 92 |
+
"sampling_params": sampling_params,
|
| 93 |
+
}
|
| 94 |
+
if lora_name:
|
| 95 |
+
payload["lora_path"] = [lora_name]
|
| 96 |
+
|
| 97 |
+
timeout = httpx.Timeout(3600.0, connect=3600.0)
|
| 98 |
+
async with httpx.AsyncClient(timeout=timeout) as client:
|
| 99 |
+
resp = await client.post(self.urls["generate"], json=payload)
|
| 100 |
+
resp.raise_for_status()
|
| 101 |
+
return resp.json()[0]["text"]
|
| 102 |
+
|
| 103 |
+
def toggle_training_mode(self) -> None:
|
| 104 |
+
# free KV space while training adapters
|
| 105 |
+
requests.post(
|
| 106 |
+
self.urls["release"], json={"tags": ["kv_cache"]}
|
| 107 |
+
).raise_for_status()
|
| 108 |
+
|
| 109 |
+
def toggle_eval_mode(self) -> None:
|
| 110 |
+
# re-allocate KV space
|
| 111 |
+
try:
|
| 112 |
+
requests.post(
|
| 113 |
+
self.urls["resume"], json={"tags": ["kv_cache"]}
|
| 114 |
+
).raise_for_status()
|
| 115 |
+
except Exception:
|
| 116 |
+
pass
|
| 117 |
+
|
| 118 |
+
def shutdown(self) -> None:
|
| 119 |
+
from sglang.utils import terminate_process
|
| 120 |
+
|
| 121 |
+
if self.proc:
|
| 122 |
+
terminate_process(self.proc)
|
| 123 |
+
|
| 124 |
+
def _short_id(self) -> str:
|
| 125 |
+
import uuid
|
| 126 |
+
|
| 127 |
+
return str(uuid.uuid4().int)[:8]
|
src_code_for_reproducibility/models/inference_backend_vllm.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import re
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
from transformers import AutoTokenizer
|
| 7 |
+
from vllm import AsyncEngineArgs, AsyncLLMEngine, SamplingParams
|
| 8 |
+
from vllm.inputs import TokensPrompt
|
| 9 |
+
from vllm.lora.request import LoRARequest
|
| 10 |
+
from vllm.sampling_params import GuidedDecodingParams, RequestOutputKind
|
| 11 |
+
|
| 12 |
+
from mllm.models.inference_backend import LLMInferenceBackend, LLMInferenceOutput
|
| 13 |
+
from mllm.utils.short_id_gen import generate_short_id
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class VLLMAsyncBackend(LLMInferenceBackend):
|
| 17 |
+
def __init__(
|
| 18 |
+
self,
|
| 19 |
+
model_name: str,
|
| 20 |
+
tokenizer: AutoTokenizer,
|
| 21 |
+
# adapter_paths: dict[str, str],
|
| 22 |
+
engine_init_kwargs: dict = {},
|
| 23 |
+
sampling_params: dict = {},
|
| 24 |
+
):
|
| 25 |
+
self.model_name = model_name
|
| 26 |
+
# self.adapter_paths = adapter_paths or {}
|
| 27 |
+
# self.current_adapter = None
|
| 28 |
+
# self.vllm_adapter_ids = {
|
| 29 |
+
# adapter_id: generate_short_id() for adapter_id in adapter_paths.keys()
|
| 30 |
+
# }
|
| 31 |
+
self.vllm_adapter_ids = {}
|
| 32 |
+
ea = dict(model=model_name, **engine_init_kwargs)
|
| 33 |
+
# ea["enable_lora"] = True
|
| 34 |
+
# ea["max_loras"] = len(self.vllm_adapter_ids)
|
| 35 |
+
# ea["enable_sleep_mode"] = True
|
| 36 |
+
self.engine = AsyncLLMEngine.from_engine_args(AsyncEngineArgs(**ea))
|
| 37 |
+
|
| 38 |
+
self.sampling_params = sampling_params
|
| 39 |
+
|
| 40 |
+
def prepare_adapter(
|
| 41 |
+
self,
|
| 42 |
+
adapter_id: Optional[str],
|
| 43 |
+
adapter_path: Optional[str],
|
| 44 |
+
weights_got_updated: bool,
|
| 45 |
+
) -> None:
|
| 46 |
+
# self.current_adapter = adapter_id
|
| 47 |
+
if weights_got_updated:
|
| 48 |
+
self.vllm_adapter_ids[adapter_id] = generate_short_id()
|
| 49 |
+
self.current_lora_request = LoRARequest(
|
| 50 |
+
adapter_id,
|
| 51 |
+
self.vllm_adapter_ids[adapter_id],
|
| 52 |
+
adapter_path,
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
async def toggle_training_mode(self) -> None:
|
| 56 |
+
await self.engine.sleep(level=1)
|
| 57 |
+
|
| 58 |
+
async def toggle_eval_mode(self) -> None:
|
| 59 |
+
await self.engine.wake_up()
|
| 60 |
+
|
| 61 |
+
def shutdown(self) -> None:
|
| 62 |
+
# No explicit close call; engine stops when process exits.
|
| 63 |
+
pass
|
| 64 |
+
|
| 65 |
+
async def generate(
|
| 66 |
+
self,
|
| 67 |
+
input_token_ids: list[int],
|
| 68 |
+
regex: Optional[str] = None,
|
| 69 |
+
extract_thinking: bool = False,
|
| 70 |
+
) -> LLMInferenceOutput:
|
| 71 |
+
# Build SamplingParams correctly
|
| 72 |
+
guided = GuidedDecodingParams(regex=regex) if regex else None
|
| 73 |
+
sp = SamplingParams(
|
| 74 |
+
**self.sampling_params,
|
| 75 |
+
guided_decoding=guided,
|
| 76 |
+
output_kind=RequestOutputKind.FINAL_ONLY,
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
prompt = TokensPrompt(prompt_token_ids=input_token_ids)
|
| 80 |
+
request_id = f"req-{asyncio.get_running_loop().time()}"
|
| 81 |
+
result_generator = self.engine.generate(
|
| 82 |
+
prompt,
|
| 83 |
+
sp, # SamplingParams(...)
|
| 84 |
+
request_id,
|
| 85 |
+
lora_request=self.current_lora_request,
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
async for out in result_generator: # with FINAL_ONLY this runs once
|
| 89 |
+
res = out
|
| 90 |
+
|
| 91 |
+
raw_text = res.outputs[0].text
|
| 92 |
+
out_token_ids = res.outputs[0].token_ids
|
| 93 |
+
log_probs = [
|
| 94 |
+
logprob_dict[token_id].logprob
|
| 95 |
+
for token_id, logprob_dict in zip(out_token_ids, res.outputs[0].logprobs)
|
| 96 |
+
]
|
| 97 |
+
log_probs = torch.tensor(log_probs)
|
| 98 |
+
out_token_ids = torch.tensor(out_token_ids, dtype=torch.long)
|
| 99 |
+
# for out_token_id, logprob_dict in zip(out_token_ids, res.outputs[0].logprobs):
|
| 100 |
+
# if logprob_dict[out_token_id].logprob < -1:
|
| 101 |
+
# print(f"High negative logprob {logprob_dict[out_token_id].logprob} for {logprob_dict}")
|
| 102 |
+
content = raw_text
|
| 103 |
+
reasoning_content = None
|
| 104 |
+
|
| 105 |
+
if extract_thinking:
|
| 106 |
+
m = re.match(
|
| 107 |
+
r"^\n<think>\n([\s\S]*?)</think>\n\n(.*)$", raw_text, flags=re.DOTALL
|
| 108 |
+
)
|
| 109 |
+
if m:
|
| 110 |
+
reasoning_content = m.group(1)
|
| 111 |
+
content = m.group(2)
|
| 112 |
+
return LLMInferenceOutput(
|
| 113 |
+
content=content,
|
| 114 |
+
reasoning_content=reasoning_content,
|
| 115 |
+
log_probs=log_probs,
|
| 116 |
+
out_token_ids=out_token_ids,
|
| 117 |
+
)
|
src_code_for_reproducibility/models/inference_backend_vllm_local_server.py
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import subprocess
|
| 4 |
+
import time
|
| 5 |
+
|
| 6 |
+
import httpx
|
| 7 |
+
import requests
|
| 8 |
+
|
| 9 |
+
from mllm.models.inference_backend import LLMInferenceBackend
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class HttpVLLMBackend(LLMInferenceBackend):
|
| 13 |
+
def __init__(self, **kwargs):
|
| 14 |
+
super().__init__(**kwargs)
|
| 15 |
+
self.port = kwargs.get("port", 8000)
|
| 16 |
+
self.host = kwargs.get("host", "0.0.0.0")
|
| 17 |
+
self.proc = None
|
| 18 |
+
self.base_url = f"http://{self.host}:{self.port}"
|
| 19 |
+
# vLLM memory safety knobs
|
| 20 |
+
self.gpu_mem_util = kwargs.get("gpu_memory_utilization", 0.9)
|
| 21 |
+
self.max_model_len = kwargs.get("max_model_len", None)
|
| 22 |
+
self.max_num_seqs = kwargs.get("max_num_seqs", None)
|
| 23 |
+
self.max_batched_tokens = kwargs.get("max_num_batched_tokens", None)
|
| 24 |
+
self.dtype = kwargs.get("dtype", "bfloat16")
|
| 25 |
+
self.trust_remote_code = kwargs.get("trust_remote_code", False)
|
| 26 |
+
# LoRA strategy: "preload" (CLI) or "runtime" (endpoints) depending on your vLLM build
|
| 27 |
+
self.lora_mode = kwargs.get(
|
| 28 |
+
"lora_mode", "preload"
|
| 29 |
+
) # "runtime" supported in newer builds
|
| 30 |
+
self.runtime_lora_enabled = self.lora_mode == "runtime"
|
| 31 |
+
|
| 32 |
+
# If preloading: build CLI args (adapter name -> path)
|
| 33 |
+
self._preload_lora_args = []
|
| 34 |
+
if self.adapter_paths and self.lora_mode == "preload":
|
| 35 |
+
# vLLM supports multiple LoRA modules via CLI in recent versions
|
| 36 |
+
# Example flag shapes can vary; adapt as needed for your version:
|
| 37 |
+
# --lora-modules adapter_id=path
|
| 38 |
+
for aid, pth in self.adapter_paths.items():
|
| 39 |
+
self._preload_lora_args += ["--lora-modules", f"{aid}={pth}"]
|
| 40 |
+
|
| 41 |
+
def launch(self):
|
| 42 |
+
# Build vLLM serve command
|
| 43 |
+
cmd = [
|
| 44 |
+
"python3",
|
| 45 |
+
"-m",
|
| 46 |
+
"vllm.entrypoints.openai.api_server",
|
| 47 |
+
"--model",
|
| 48 |
+
self.model_name,
|
| 49 |
+
"--host",
|
| 50 |
+
self.host,
|
| 51 |
+
"--port",
|
| 52 |
+
str(self.port),
|
| 53 |
+
"--dtype",
|
| 54 |
+
self.dtype,
|
| 55 |
+
"--gpu-memory-utilization",
|
| 56 |
+
str(self.gpu_mem_util),
|
| 57 |
+
]
|
| 58 |
+
if self.trust_remote_code:
|
| 59 |
+
cmd += ["--trust-remote-code"]
|
| 60 |
+
if self.max_model_len:
|
| 61 |
+
cmd += ["--max-model-len", str(self.max_model_len)]
|
| 62 |
+
if self.max_num_seqs:
|
| 63 |
+
cmd += ["--max-num-seqs", str(self.max_num_seqs)]
|
| 64 |
+
if self.max_batched_tokens:
|
| 65 |
+
cmd += ["--max-num-batched-tokens", str(self.max_batched_tokens)]
|
| 66 |
+
cmd += self._preload_lora_args
|
| 67 |
+
|
| 68 |
+
self.proc = subprocess.Popen(
|
| 69 |
+
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True
|
| 70 |
+
)
|
| 71 |
+
self._wait_ready()
|
| 72 |
+
|
| 73 |
+
def _wait_ready(self, timeout=120):
|
| 74 |
+
url = f"{self.base_url}/v1/models"
|
| 75 |
+
t0 = time.time()
|
| 76 |
+
while time.time() - t0 < timeout:
|
| 77 |
+
try:
|
| 78 |
+
r = requests.get(url, timeout=2)
|
| 79 |
+
if r.status_code == 200:
|
| 80 |
+
return
|
| 81 |
+
except Exception:
|
| 82 |
+
pass
|
| 83 |
+
time.sleep(1)
|
| 84 |
+
raise RuntimeError("vLLM server did not become ready in time")
|
| 85 |
+
|
| 86 |
+
def is_ready(self) -> bool:
|
| 87 |
+
try:
|
| 88 |
+
return (
|
| 89 |
+
requests.get(f"{self.base_url}/v1/models", timeout=2).status_code == 200
|
| 90 |
+
)
|
| 91 |
+
except Exception:
|
| 92 |
+
return False
|
| 93 |
+
|
| 94 |
+
def prepare_adapter(self, adapter_id: str) -> None:
|
| 95 |
+
if not adapter_id or not self.runtime_lora_enabled:
|
| 96 |
+
return
|
| 97 |
+
# Newer vLLM builds expose runtime LoRA endpoints. If yours differs,
|
| 98 |
+
# adjust the path/body here and keep the interface stable.
|
| 99 |
+
try:
|
| 100 |
+
requests.post(
|
| 101 |
+
f"{self.base_url}/v1/load_lora_adapter",
|
| 102 |
+
json={
|
| 103 |
+
"adapter_name": adapter_id,
|
| 104 |
+
"adapter_path": self.adapter_paths[adapter_id],
|
| 105 |
+
},
|
| 106 |
+
timeout=10,
|
| 107 |
+
).raise_for_status()
|
| 108 |
+
except Exception as e:
|
| 109 |
+
# If already loaded or endpoint not present, swallow or log
|
| 110 |
+
pass
|
| 111 |
+
|
| 112 |
+
async def generate(
|
| 113 |
+
self, prompt_text: str, sampling_params: dict, adapter_id: str | None
|
| 114 |
+
) -> str:
|
| 115 |
+
# Map your sampling params to OpenAI schema
|
| 116 |
+
body = {
|
| 117 |
+
"model": self.model_name,
|
| 118 |
+
"messages": [{"role": "user", "content": prompt_text}],
|
| 119 |
+
"temperature": sampling_params.get("temperature", 1.0),
|
| 120 |
+
"top_p": sampling_params.get("top_p", 1.0),
|
| 121 |
+
"max_tokens": sampling_params.get("max_new_tokens", 128),
|
| 122 |
+
}
|
| 123 |
+
# Optional knobs:
|
| 124 |
+
if sampling_params.get("top_k", -1) and sampling_params["top_k"] > 0:
|
| 125 |
+
# vLLM accepts top_k via extra params; put under "extra_body"
|
| 126 |
+
body.setdefault("extra_body", {})["top_k"] = sampling_params["top_k"]
|
| 127 |
+
if sampling_params.get("min_new_tokens", None) is not None:
|
| 128 |
+
body.setdefault("extra_body", {})["min_tokens"] = sampling_params[
|
| 129 |
+
"min_new_tokens"
|
| 130 |
+
]
|
| 131 |
+
if sampling_params.get("frequency_penalty", None) is not None:
|
| 132 |
+
body["frequency_penalty"] = sampling_params["frequency_penalty"]
|
| 133 |
+
|
| 134 |
+
# Select LoRA adapter
|
| 135 |
+
if adapter_id:
|
| 136 |
+
if self.runtime_lora_enabled:
|
| 137 |
+
body.setdefault("extra_body", {})["lora_adapter"] = adapter_id
|
| 138 |
+
else:
|
| 139 |
+
# when preloaded via CLI, most builds select by name via "adapter_name"/"lora_adapter"
|
| 140 |
+
body.setdefault("extra_body", {})["lora_adapter"] = adapter_id
|
| 141 |
+
|
| 142 |
+
url = f"{self.base_url}/v1/chat/completions"
|
| 143 |
+
timeout = httpx.Timeout(3600.0, connect=3600.0)
|
| 144 |
+
async with httpx.AsyncClient(timeout=timeout) as client:
|
| 145 |
+
resp = await client.post(url, json=body)
|
| 146 |
+
resp.raise_for_status()
|
| 147 |
+
data = resp.json()
|
| 148 |
+
return data["choices"][0]["message"]["content"]
|
| 149 |
+
|
| 150 |
+
def toggle_training_mode(self) -> None:
|
| 151 |
+
# vLLM doesn’t expose an explicit KV “release” toggle via API.
|
| 152 |
+
# Strategy: keep inference server idle during training, or run training in a separate process.
|
| 153 |
+
pass
|
| 154 |
+
|
| 155 |
+
def toggle_eval_mode(self) -> None:
|
| 156 |
+
pass
|
| 157 |
+
|
| 158 |
+
def shutdown(self) -> None:
|
| 159 |
+
if self.proc:
|
| 160 |
+
self.proc.terminate()
|
src_code_for_reproducibility/models/large_language_model_api.py
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import asyncio
|
| 4 |
+
import copy
|
| 5 |
+
import os
|
| 6 |
+
import random
|
| 7 |
+
import re
|
| 8 |
+
from typing import Any, Callable, Dict, List, Optional, Sequence
|
| 9 |
+
|
| 10 |
+
import backoff
|
| 11 |
+
from openai import AsyncOpenAI, OpenAIError
|
| 12 |
+
|
| 13 |
+
from mllm.markov_games.rollout_tree import ChatTurn
|
| 14 |
+
from mllm.models.inference_backend import LLMInferenceOutput
|
| 15 |
+
|
| 16 |
+
# TODO: Get this automatically from OpenAI
|
| 17 |
+
reasoning_models = [
|
| 18 |
+
"gpt-5-nano",
|
| 19 |
+
"gpt-5-mini",
|
| 20 |
+
"gpt-5",
|
| 21 |
+
"o1-mini",
|
| 22 |
+
"o1",
|
| 23 |
+
"o1-pro",
|
| 24 |
+
"o3-mini",
|
| 25 |
+
"o3",
|
| 26 |
+
"o3-pro",
|
| 27 |
+
"o4-mini",
|
| 28 |
+
"o4",
|
| 29 |
+
"o4-pro",
|
| 30 |
+
]
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class LargeLanguageModelOpenAI:
|
| 34 |
+
"""Tiny async wrapper for OpenAI Chat Completions."""
|
| 35 |
+
|
| 36 |
+
def __init__(
|
| 37 |
+
self,
|
| 38 |
+
llm_id: str = "",
|
| 39 |
+
model: str = "gpt-4.1-mini",
|
| 40 |
+
api_key: Optional[str] = None,
|
| 41 |
+
base_url: Optional[str] = None,
|
| 42 |
+
timeout_s: float = 300.0,
|
| 43 |
+
regex_max_attempts: int = 10,
|
| 44 |
+
sampling_params: Optional[Dict[str, Any]] = None,
|
| 45 |
+
init_kwargs: Optional[Dict[str, Any]] = None,
|
| 46 |
+
output_directory: Optional[str] = None,
|
| 47 |
+
) -> None:
|
| 48 |
+
self.llm_id = llm_id
|
| 49 |
+
self.model = model
|
| 50 |
+
key = api_key or os.getenv("OPENAI_API_KEY")
|
| 51 |
+
if not key:
|
| 52 |
+
raise RuntimeError(
|
| 53 |
+
"Set OPENAI_API_KEY as global environment variable or pass api_key."
|
| 54 |
+
)
|
| 55 |
+
client_kwargs: Dict[str, Any] = {"api_key": key, "timeout": timeout_s}
|
| 56 |
+
if base_url:
|
| 57 |
+
client_kwargs["base_url"] = base_url
|
| 58 |
+
self.client = AsyncOpenAI(**client_kwargs)
|
| 59 |
+
|
| 60 |
+
# Sampling/default request params set at init
|
| 61 |
+
self.sampling_params = sampling_params
|
| 62 |
+
self.use_reasoning = model in reasoning_models
|
| 63 |
+
if self.use_reasoning:
|
| 64 |
+
self.sampling_params["reasoning"] = {
|
| 65 |
+
"effort": "low",
|
| 66 |
+
"summary": "detailed",
|
| 67 |
+
}
|
| 68 |
+
self.regex_max_attempts = max(1, int(regex_max_attempts))
|
| 69 |
+
|
| 70 |
+
def get_inference_policies(self) -> Dict[str, Callable]:
|
| 71 |
+
return {
|
| 72 |
+
self.llm_id: self.get_action,
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
async def prepare_adapter_for_inference(self, *args: Any, **kwargs: Any) -> None:
|
| 76 |
+
await asyncio.sleep(0)
|
| 77 |
+
pass
|
| 78 |
+
|
| 79 |
+
async def toggle_eval_mode(self, *args: Any, **kwargs: Any) -> None:
|
| 80 |
+
await asyncio.sleep(0)
|
| 81 |
+
pass
|
| 82 |
+
|
| 83 |
+
async def toggle_training_mode(self, *args: Any, **kwargs: Any) -> None:
|
| 84 |
+
await asyncio.sleep(0)
|
| 85 |
+
pass
|
| 86 |
+
|
| 87 |
+
async def export_adapters(self, *args: Any, **kwargs: Any) -> None:
|
| 88 |
+
await asyncio.sleep(0)
|
| 89 |
+
pass
|
| 90 |
+
|
| 91 |
+
async def checkpoint_all_adapters(self, *args: Any, **kwargs: Any) -> None:
|
| 92 |
+
await asyncio.sleep(0)
|
| 93 |
+
pass
|
| 94 |
+
|
| 95 |
+
def extract_output_from_response(self, resp: Response) -> LLMInferenceOutput:
|
| 96 |
+
if len(resp.output) > 1:
|
| 97 |
+
summary = resp.output[0].summary
|
| 98 |
+
if summary != []:
|
| 99 |
+
reasoning_content = summary[0].text
|
| 100 |
+
reasoning_content = f"OpenAI Reasoning Summary: {reasoning_content}"
|
| 101 |
+
else:
|
| 102 |
+
reasoning_content = None
|
| 103 |
+
content = resp.output[1].content[0].text
|
| 104 |
+
else:
|
| 105 |
+
reasoning_content = None
|
| 106 |
+
content = resp.output[0].content[0].text
|
| 107 |
+
|
| 108 |
+
return LLMInferenceOutput(
|
| 109 |
+
content=content,
|
| 110 |
+
reasoning_content=reasoning_content,
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
@backoff.on_exception(
|
| 114 |
+
backoff.expo, Exception, max_time=10**10, max_tries=10**10
|
| 115 |
+
)
|
| 116 |
+
async def get_action(
|
| 117 |
+
self,
|
| 118 |
+
state: list[ChatTurn],
|
| 119 |
+
agent_id: str,
|
| 120 |
+
regex: Optional[str] = None,
|
| 121 |
+
) -> LLMInferenceOutput:
|
| 122 |
+
# Remove any non-role/content keys from the prompt else openai will error
|
| 123 |
+
|
| 124 |
+
# TODO:
|
| 125 |
+
prompt = [{"role": p.role, "content": p.content} for p in state]
|
| 126 |
+
|
| 127 |
+
# if self.sleep_between_requests:
|
| 128 |
+
# await self.wait_random_time()
|
| 129 |
+
|
| 130 |
+
# If regex is required, prime the model and validate client-side
|
| 131 |
+
if regex:
|
| 132 |
+
constraint_msg = {
|
| 133 |
+
"role": "user",
|
| 134 |
+
"content": (
|
| 135 |
+
f"Output must match this regex exactly: {regex} \n"
|
| 136 |
+
"Return only the matching string, with no quotes or extra text."
|
| 137 |
+
),
|
| 138 |
+
}
|
| 139 |
+
prompt = [constraint_msg, *prompt]
|
| 140 |
+
pattern = re.compile(regex)
|
| 141 |
+
for _ in range(self.regex_max_attempts):
|
| 142 |
+
resp = await self.client.responses.create(
|
| 143 |
+
model=self.model,
|
| 144 |
+
input=prompt,
|
| 145 |
+
**self.sampling_params,
|
| 146 |
+
)
|
| 147 |
+
policy_output = self.extract_output_from_response(resp)
|
| 148 |
+
if pattern.fullmatch(policy_output.content):
|
| 149 |
+
return policy_output
|
| 150 |
+
prompt = [
|
| 151 |
+
*prompt,
|
| 152 |
+
{
|
| 153 |
+
"role": "user",
|
| 154 |
+
"content": (
|
| 155 |
+
f"Invalid response format. Expected format (regex): {regex}\n Please try again and provide ONLY a response that matches this regex."
|
| 156 |
+
),
|
| 157 |
+
},
|
| 158 |
+
]
|
| 159 |
+
return policy_output
|
| 160 |
+
|
| 161 |
+
# Simple, unconstrained generation
|
| 162 |
+
resp = await self.client.responses.create(
|
| 163 |
+
model=self.model,
|
| 164 |
+
input=prompt,
|
| 165 |
+
**self.sampling_params,
|
| 166 |
+
)
|
| 167 |
+
policy_output = self.extract_output_from_response(resp)
|
| 168 |
+
return policy_output
|
| 169 |
+
|
| 170 |
+
def shutdown(self) -> None:
|
| 171 |
+
self.client = None
|