repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/aflow/scripts/optimizer_utils/data_utils.py | metagpt/ext/aflow/scripts/optimizer_utils/data_utils.py | import datetime
import json
import os
import random
import numpy as np
import pandas as pd
from metagpt.logs import logger
from metagpt.utils.common import read_json_file, write_json_file
class DataUtils:
def __init__(self, root_path: str):
self.root_path = root_path
self.top_scores = []
def load_results(self, path: str) -> list:
result_path = os.path.join(path, "results.json")
if os.path.exists(result_path):
with open(result_path, "r") as json_file:
try:
return json.load(json_file)
except json.JSONDecodeError:
return []
return []
def get_top_rounds(self, sample: int, path=None, mode="Graph"):
self._load_scores(path, mode)
unique_rounds = set()
unique_top_scores = []
first_round = next((item for item in self.top_scores if item["round"] == 1), None)
if first_round:
unique_top_scores.append(first_round)
unique_rounds.add(1)
for item in self.top_scores:
if item["round"] not in unique_rounds:
unique_top_scores.append(item)
unique_rounds.add(item["round"])
if len(unique_top_scores) >= sample:
break
return unique_top_scores
def select_round(self, items):
if not items:
raise ValueError("Item list is empty.")
sorted_items = sorted(items, key=lambda x: x["score"], reverse=True)
scores = [item["score"] * 100 for item in sorted_items]
probabilities = self._compute_probabilities(scores)
logger.info(f"\nMixed probability distribution: {probabilities}")
logger.info(f"\nSorted rounds: {sorted_items}")
selected_index = np.random.choice(len(sorted_items), p=probabilities)
logger.info(f"\nSelected index: {selected_index}, Selected item: {sorted_items[selected_index]}")
return sorted_items[selected_index]
def _compute_probabilities(self, scores, alpha=0.2, lambda_=0.3):
scores = np.array(scores, dtype=np.float64)
n = len(scores)
if n == 0:
raise ValueError("Score list is empty.")
uniform_prob = np.full(n, 1.0 / n, dtype=np.float64)
max_score = np.max(scores)
shifted_scores = scores - max_score
exp_weights = np.exp(alpha * shifted_scores)
sum_exp_weights = np.sum(exp_weights)
if sum_exp_weights == 0:
raise ValueError("Sum of exponential weights is 0, cannot normalize.")
score_prob = exp_weights / sum_exp_weights
mixed_prob = lambda_ * uniform_prob + (1 - lambda_) * score_prob
total_prob = np.sum(mixed_prob)
if not np.isclose(total_prob, 1.0):
mixed_prob = mixed_prob / total_prob
return mixed_prob
def load_log(self, cur_round, path=None, mode: str = "Graph"):
if mode == "Graph":
log_dir = os.path.join(self.root_path, "workflows", f"round_{cur_round}", "log.json")
else:
log_dir = path
# 检查文件是否存在
if not os.path.exists(log_dir):
return "" # 如果文件不存在,返回空字符串
logger.info(log_dir)
data = read_json_file(log_dir, encoding="utf-8")
if isinstance(data, dict):
data = [data]
elif not isinstance(data, list):
data = list(data)
if not data:
return ""
sample_size = min(3, len(data))
random_samples = random.sample(data, sample_size)
log = ""
for sample in random_samples:
log += json.dumps(sample, indent=4, ensure_ascii=False) + "\n\n"
return log
def get_results_file_path(self, graph_path: str) -> str:
return os.path.join(graph_path, "results.json")
def create_result_data(self, round: int, score: float, avg_cost: float, total_cost: float) -> dict:
now = datetime.datetime.now()
return {"round": round, "score": score, "avg_cost": avg_cost, "total_cost": total_cost, "time": now}
def save_results(self, json_file_path: str, data: list):
write_json_file(json_file_path, data, encoding="utf-8", indent=4)
def _load_scores(self, path=None, mode="Graph"):
if mode == "Graph":
rounds_dir = os.path.join(self.root_path, "workflows")
else:
rounds_dir = path
result_file = os.path.join(rounds_dir, "results.json")
self.top_scores = []
data = read_json_file(result_file, encoding="utf-8")
df = pd.DataFrame(data)
scores_per_round = df.groupby("round")["score"].mean().to_dict()
for round_number, average_score in scores_per_round.items():
self.top_scores.append({"round": round_number, "score": average_score})
self.top_scores.sort(key=lambda x: x["score"], reverse=True)
return self.top_scores
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/aflow/scripts/optimizer_utils/graph_utils.py | metagpt/ext/aflow/scripts/optimizer_utils/graph_utils.py | import json
import os
import re
import time
import traceback
from typing import List
from metagpt.ext.aflow.scripts.prompts.optimize_prompt import (
WORKFLOW_CUSTOM_USE,
WORKFLOW_INPUT,
WORKFLOW_OPTIMIZE_PROMPT,
WORKFLOW_TEMPLATE,
)
from metagpt.logs import logger
class GraphUtils:
def __init__(self, root_path: str):
self.root_path = root_path
def create_round_directory(self, graph_path: str, round_number: int) -> str:
directory = os.path.join(graph_path, f"round_{round_number}")
os.makedirs(directory, exist_ok=True)
return directory
def load_graph(self, round_number: int, workflows_path: str):
workflows_path = workflows_path.replace("\\", ".").replace("/", ".")
graph_module_name = f"{workflows_path}.round_{round_number}.graph"
try:
graph_module = __import__(graph_module_name, fromlist=[""])
graph_class = getattr(graph_module, "Workflow")
return graph_class
except ImportError as e:
logger.info(f"Error loading graph for round {round_number}: {e}")
raise
def read_graph_files(self, round_number: int, workflows_path: str):
prompt_file_path = os.path.join(workflows_path, f"round_{round_number}", "prompt.py")
graph_file_path = os.path.join(workflows_path, f"round_{round_number}", "graph.py")
try:
with open(prompt_file_path, "r", encoding="utf-8") as file:
prompt_content = file.read()
with open(graph_file_path, "r", encoding="utf-8") as file:
graph_content = file.read()
except FileNotFoundError as e:
logger.info(f"Error: File not found for round {round_number}: {e}")
raise
except Exception as e:
logger.info(f"Error loading prompt for round {round_number}: {e}")
raise
return prompt_content, graph_content
def extract_solve_graph(self, graph_load: str) -> List[str]:
pattern = r"class Workflow:.+"
return re.findall(pattern, graph_load, re.DOTALL)
def load_operators_description(self, operators: List[str]) -> str:
path = f"{self.root_path}/workflows/template/operator.json"
operators_description = ""
for id, operator in enumerate(operators):
operator_description = self._load_operator_description(id + 1, operator, path)
operators_description += f"{operator_description}\n"
return operators_description
def _load_operator_description(self, id: int, operator_name: str, file_path: str) -> str:
with open(file_path, "r") as f:
operator_data = json.load(f)
matched_data = operator_data[operator_name]
desc = matched_data["description"]
interface = matched_data["interface"]
return f"{id}. {operator_name}: {desc}, with interface {interface})."
def create_graph_optimize_prompt(
self,
experience: str,
score: float,
graph: str,
prompt: str,
operator_description: str,
type: str,
log_data: str,
) -> str:
graph_input = WORKFLOW_INPUT.format(
experience=experience,
score=score,
graph=graph,
prompt=prompt,
operator_description=operator_description,
type=type,
log=log_data,
)
graph_system = WORKFLOW_OPTIMIZE_PROMPT.format(type=type)
return graph_input + WORKFLOW_CUSTOM_USE + graph_system
async def get_graph_optimize_response(self, graph_optimize_node):
max_retries = 5
retries = 0
while retries < max_retries:
try:
response = graph_optimize_node.instruct_content.model_dump()
return response
except Exception as e:
retries += 1
logger.info(f"Error generating prediction: {e}. Retrying... ({retries}/{max_retries})")
if retries == max_retries:
logger.info("Maximum retries reached. Skipping this sample.")
break
traceback.print_exc()
time.sleep(5)
return None
def write_graph_files(self, directory: str, response: dict, round_number: int, dataset: str):
graph = WORKFLOW_TEMPLATE.format(graph=response["graph"], round=round_number, dataset=dataset)
with open(os.path.join(directory, "graph.py"), "w", encoding="utf-8") as file:
file.write(graph)
with open(os.path.join(directory, "prompt.py"), "w", encoding="utf-8") as file:
file.write(response["prompt"])
with open(os.path.join(directory, "__init__.py"), "w", encoding="utf-8") as file:
file.write("")
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/aflow/scripts/optimizer_utils/experience_utils.py | metagpt/ext/aflow/scripts/optimizer_utils/experience_utils.py | import json
import os
from collections import defaultdict
from metagpt.logs import logger
from metagpt.utils.common import read_json_file, write_json_file
class ExperienceUtils:
def __init__(self, root_path: str):
self.root_path = root_path
def load_experience(self, path=None, mode: str = "Graph"):
if mode == "Graph":
rounds_dir = os.path.join(self.root_path, "workflows")
else:
rounds_dir = path
experience_data = defaultdict(lambda: {"score": None, "success": {}, "failure": {}})
for round_dir in os.listdir(rounds_dir):
if os.path.isdir(os.path.join(rounds_dir, round_dir)) and round_dir.startswith("round_"):
round_path = os.path.join(rounds_dir, round_dir)
try:
round_number = int(round_dir.split("_")[1])
json_file_path = os.path.join(round_path, "experience.json")
if os.path.exists(json_file_path):
data = read_json_file(json_file_path, encoding="utf-8")
father_node = data["father node"]
if experience_data[father_node]["score"] is None:
experience_data[father_node]["score"] = data["before"]
if data["succeed"]:
experience_data[father_node]["success"][round_number] = {
"modification": data["modification"],
"score": data["after"],
}
else:
experience_data[father_node]["failure"][round_number] = {
"modification": data["modification"],
"score": data["after"],
}
except Exception as e:
logger.info(f"Error processing {round_dir}: {str(e)}")
experience_data = dict(experience_data)
output_path = os.path.join(rounds_dir, "processed_experience.json")
with open(output_path, "w", encoding="utf-8") as outfile:
json.dump(experience_data, outfile, indent=4, ensure_ascii=False)
logger.info(f"Processed experience data saved to {output_path}")
return experience_data
def format_experience(self, processed_experience, sample_round):
experience_data = processed_experience.get(sample_round)
if experience_data:
experience = f"Original Score: {experience_data['score']}\n"
experience += "These are some conclusions drawn from experience:\n\n"
for key, value in experience_data["failure"].items():
experience += f"-Absolutely prohibit {value['modification']} (Score: {value['score']})\n"
for key, value in experience_data["success"].items():
experience += f"-Absolutely prohibit {value['modification']} \n"
experience += "\n\nNote: Take into account past failures and avoid repeating the same mistakes, as these failures indicate that these approaches are ineffective. You must fundamentally change your way of thinking, rather than simply using more advanced Python syntax like for, if, else, etc., or modifying the prompt."
else:
experience = f"No experience data found for round {sample_round}."
return experience
def check_modification(self, processed_experience, modification, sample_round):
experience_data = processed_experience.get(sample_round)
if experience_data:
for key, value in experience_data["failure"].items():
if value["modification"] == modification:
return False
for key, value in experience_data["success"].items():
if value["modification"] == modification:
return False
return True
else:
return True # 如果 experience_data 为空,也返回 True
def create_experience_data(self, sample, modification):
return {
"father node": sample["round"],
"modification": modification,
"before": sample["score"],
"after": None,
"succeed": None,
}
def update_experience(self, directory, experience, avg_score):
experience["after"] = avg_score
experience["succeed"] = bool(avg_score > experience["before"])
write_json_file(os.path.join(directory, "experience.json"), experience, encoding="utf-8", indent=4)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/aflow/scripts/optimizer_utils/convergence_utils.py | metagpt/ext/aflow/scripts/optimizer_utils/convergence_utils.py | # -*- coding: utf-8 -*-
# @Date : 9/23/2024 10:00 AM
# @Author : Issac
# @Desc :
import json
import os
import numpy as np
from metagpt.logs import logger
class ConvergenceUtils:
def __init__(self, root_path):
self.root_path = root_path
self.data = None
self.rounds = None
self.avg_scores, self.stds = None, None
def load_data(self, root_path):
"""
Read JSON file, create a new file if it doesn't exist, then return the data.
"""
rounds_dir = os.path.join(root_path, "workflows")
result_file = os.path.join(rounds_dir, "results.json")
# Ensure directory exists
os.makedirs(rounds_dir, exist_ok=True)
# If file doesn't exist, create a new one with an empty list
if not os.path.exists(result_file):
with open(result_file, "w") as file:
json.dump([], file)
# Read file and return data
with open(result_file, "r") as file:
return json.load(file)
def process_rounds(self):
"""
Organize data by round, return a dictionary of scores by round.
"""
self.data = self.load_data(root_path=self.root_path)
rounds = {}
for entry in self.data:
round_number = entry["round"]
score = entry["score"]
if round_number not in rounds:
rounds[round_number] = []
rounds[round_number].append(score)
return rounds
def calculate_avg_and_std(self):
"""
Calculate average score and standard deviation for each round, return two lists: average scores and standard deviations.
"""
self.rounds = self.process_rounds()
sorted_rounds = sorted(self.rounds.items(), key=lambda x: x[0])
avg_scores = []
stds = []
for round_number, scores in sorted_rounds:
avg_scores.append(np.mean(scores))
stds.append(np.std(scores))
return avg_scores, stds
def check_convergence(self, top_k=3, z=0, consecutive_rounds=5):
"""
Check for convergence. z is the z-score corresponding to the confidence level.
consecutive_rounds is the number of consecutive rounds that must meet the stop condition.
"""
# Calculate average score and standard deviation for each round
self.avg_scores, self.stds = self.calculate_avg_and_std()
# If total rounds are not enough to calculate top_k+1 rounds, return not converged
if len(self.avg_scores) < top_k + 1:
return False, None, None
convergence_count = 0 # Convergence counter
previous_y = None # Y value of the previous round (average of top_k scores)
sigma_y_previous = None # Standard error of Y value from previous round
for i in range(len(self.avg_scores)):
# Dynamically select top_k from current round and all previous rounds
top_k_indices = np.argsort(self.avg_scores[: i + 1])[::-1][
:top_k
] # Select top k indices by descending average score
top_k_scores = [self.avg_scores[j] for j in top_k_indices] # Get list of top k scores
top_k_stds = [
self.stds[j] for j in top_k_indices
] # Get list of standard deviations corresponding to top k scores
# Calculate mean of top k scores for current round, i.e., y_current
y_current = np.mean(top_k_scores)
# Calculate standard error of y_current (sigma_y_current), representing score dispersion
sigma_y_current = np.sqrt(np.sum([s**2 for s in top_k_stds]) / (top_k**2))
# If not the first round, calculate change in Y (Delta_Y) and corresponding standard error
if previous_y is not None:
# Calculate Y difference between current round and previous round
delta_y = y_current - previous_y
# Calculate standard error of Y difference (sigma_Delta_Y)
sigma_delta_y = np.sqrt(sigma_y_current**2 + sigma_y_previous**2)
# Check if Y change is within acceptable confidence interval, i.e., convergence condition
if abs(delta_y) <= z * sigma_delta_y:
convergence_count += 1
# If consecutive converged rounds reach set value, return convergence information
if convergence_count >= consecutive_rounds:
return True, i - consecutive_rounds + 1, i
else:
# If change is large, reset convergence counter
convergence_count = 0
# Update Y value and standard error for previous round
previous_y = y_current
sigma_y_previous = sigma_y_current
# If convergence condition not met, return not converged
return False, None, None
def print_results(self):
"""
Print average score and standard deviation for all rounds.
"""
self.avg_scores, self.stds = self.calculate_avg_and_std()
for i, (avg_score, std) in enumerate(zip(self.avg_scores, self.stds), 1):
logger.info(f"Round {i}: Average Score = {avg_score:.4f}, Standard Deviation = {std:.4f}")
if __name__ == "__main__":
# Use this class and specify top_k
checker = ConvergenceUtils("path") # For example, set top_k=5
converged, convergence_round, final_round = checker.check_convergence()
if converged:
logger.info(f"Convergence detected, occurred at round {convergence_round}, final round is {final_round}")
else:
logger.info("No convergence detected within all rounds")
# Print average score and standard deviation for each round
checker.print_results()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/aflow/scripts/optimizer_utils/evaluation_utils.py | metagpt/ext/aflow/scripts/optimizer_utils/evaluation_utils.py | from metagpt.ext.aflow.scripts.evaluator import Evaluator
class EvaluationUtils:
def __init__(self, root_path: str):
self.root_path = root_path
async def evaluate_initial_round(self, optimizer, graph_path, directory, validation_n, data):
# 使用 optimizer 的 graph_utils 来加载图
optimizer.graph = optimizer.graph_utils.load_graph(optimizer.round, graph_path)
evaluator = Evaluator(eval_path=directory)
for i in range(validation_n):
score, avg_cost, total_cost = await evaluator.graph_evaluate(
optimizer.dataset,
optimizer.graph,
{"dataset": optimizer.dataset, "llm_config": optimizer.execute_llm_config},
directory,
is_test=False,
)
new_data = optimizer.data_utils.create_result_data(optimizer.round, score, avg_cost, total_cost)
data.append(new_data)
result_path = optimizer.data_utils.get_results_file_path(graph_path)
optimizer.data_utils.save_results(result_path, data)
return data
async def evaluate_graph(self, optimizer, directory, validation_n, data, initial=False):
evaluator = Evaluator(eval_path=directory)
sum_score = 0
for i in range(validation_n):
score, avg_cost, total_cost = await evaluator.graph_evaluate(
optimizer.dataset,
optimizer.graph,
{"dataset": optimizer.dataset, "llm_config": optimizer.execute_llm_config},
directory,
is_test=False,
)
cur_round = optimizer.round + 1 if initial is False else optimizer.round
new_data = optimizer.data_utils.create_result_data(cur_round, score, avg_cost, total_cost)
data.append(new_data)
result_path = optimizer.data_utils.get_results_file_path(f"{optimizer.root_path}/workflows")
optimizer.data_utils.save_results(result_path, data)
sum_score += score
return sum_score / validation_n
async def evaluate_graph_test(self, optimizer, directory, is_test=True):
evaluator = Evaluator(eval_path=directory)
return await evaluator.graph_evaluate(
optimizer.dataset,
optimizer.graph,
{"dataset": optimizer.dataset, "llm_config": optimizer.execute_llm_config},
directory,
is_test=is_test,
)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/aflow/scripts/optimized/__init__.py | metagpt/ext/aflow/scripts/optimized/__init__.py | python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false | |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/aflow/benchmark/drop.py | metagpt/ext/aflow/benchmark/drop.py | import re
import string
from collections import Counter
from typing import Callable, List, Tuple
from tenacity import retry, retry_if_exception_type, stop_after_attempt, wait_fixed
from metagpt.ext.aflow.benchmark.benchmark import BaseBenchmark
from metagpt.logs import logger
class DROPBenchmark(BaseBenchmark):
def __init__(self, name: str, file_path: str, log_path: str):
super().__init__(name, file_path, log_path)
def normalize_answer(self, s: str) -> List[str]:
"""
Normalize answers for evaluation.
"""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def calculate_score(self, ground_truth: str, prediction: str) -> Tuple[float, str]:
"""
Compute the F1 score between prediction and ground truth answers.
"""
prediction_tokens = self.normalize_answer(prediction).split()
ground_truth_tokens = self.normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0, prediction
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1, prediction
@retry(stop=stop_after_attempt(5), wait=wait_fixed(1), retry=retry_if_exception_type(Exception), reraise=True)
async def _generate_output(self, graph, input_text):
return await graph(input_text)
async def evaluate_problem(self, problem: dict, graph: Callable) -> Tuple[str, str, str, float, float]:
input_text = problem["context"]
expected_output = problem["ref_text"]
answers = expected_output.split("|")
try:
output, cost = await self._generate_output(graph, input_text)
f1_scores = []
for answer in answers:
if answer.strip() != "":
output_parts = output.split("|")
for output_part in output_parts:
f1_score, _ = self.calculate_score(answer, output_part)
f1_scores.append(f1_score)
uni_score = max(f1_scores)
if uni_score < 0.3:
self.log_mismatch(input_text, expected_output, output, output)
return input_text, output, expected_output, uni_score, cost
except Exception as e:
logger.info(f"Maximum retries reached. Skipping this sample. Error: {e}")
return input_text, str(e), expected_output, 0.0, 0.0
def get_result_columns(self) -> List[str]:
return ["inputs", "prediction", "expected_output", "score", "cost"]
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/aflow/benchmark/gsm8k.py | metagpt/ext/aflow/benchmark/gsm8k.py | # -*- coding: utf-8 -*-
# @Date :
# @Author : all
# @Desc : test on gsm8k
import re
from typing import Callable, List, Optional, Tuple
from tenacity import retry, retry_if_exception_type, stop_after_attempt, wait_fixed
from metagpt.ext.aflow.benchmark.benchmark import BaseBenchmark
from metagpt.logs import logger
class GSM8KBenchmark(BaseBenchmark):
def __init__(self, name: str, file_path: str, log_path: str):
super().__init__(name, file_path, log_path)
def extract_number(self, text: str) -> Optional[float]:
matches = re.findall(r"[-+]?\d+(?:,\d{3})*(?:\.\d+)?|\d+\.\d+", str(text))
if matches:
last_number = matches[-1].replace(",", "")
try:
return float(last_number)
except ValueError:
return None
else:
return None
def calculate_score(self, expected_output: float, prediction: float) -> Tuple[float, float]:
if prediction is None:
return 0.0, prediction
return 1.0 if abs(expected_output - prediction) <= 1e-6 else 0.0, prediction
@retry(stop=stop_after_attempt(5), wait=wait_fixed(1), retry=retry_if_exception_type(Exception), reraise=True)
async def _generate_output(self, graph, input_text):
return await graph(input_text)
async def evaluate_problem(self, problem: dict, graph: Callable) -> Tuple[str, str, float, float, float]:
input_text = problem["question"]
expected_output = self.extract_number(problem["answer"])
try:
output, cost = await self._generate_output(graph, input_text)
predicted_number = self.extract_number(output)
score, extracted_output = self.calculate_score(expected_output, predicted_number)
if score == 0:
self.log_mismatch(input_text, expected_output, output, extracted_output)
return input_text, output, expected_output, score, cost
except Exception as e:
logger.info(f"Maximum retries reached. Skipping this sample. Error: {e}")
return input_text, str(e), expected_output, 0.0, 0.0
def get_result_columns(self) -> List[str]:
return ["question", "prediction", "expected_output", "score", "cost"]
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/aflow/benchmark/hotpotqa.py | metagpt/ext/aflow/benchmark/hotpotqa.py | import re
import string
from collections import Counter
from typing import Callable, List, Tuple
from tenacity import retry, retry_if_exception_type, stop_after_attempt, wait_fixed
from metagpt.ext.aflow.benchmark.benchmark import BaseBenchmark
from metagpt.logs import logger
class HotpotQABenchmark(BaseBenchmark):
def __init__(self, name: str, file_path: str, log_path: str):
super().__init__(name, file_path, log_path)
def normalize_answer(self, s: str) -> str:
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def calculate_score(self, ground_truth: str, prediction: str) -> Tuple[float, str]:
prediction_tokens = self.normalize_answer(prediction).split()
ground_truth_tokens = self.normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0, prediction
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1, prediction
@retry(stop=stop_after_attempt(5), wait=wait_fixed(1), retry=retry_if_exception_type(Exception), reraise=True)
async def _generate_output(self, graph, input_text):
return await graph(input_text)
async def evaluate_problem(self, problem: dict, graph: Callable) -> Tuple[str, str, str, str, float, float]:
input_text = problem["question"]
expected_output = problem["answer"]
paragraphs = [item[1] for item in problem["context"] if isinstance(item[1], list)]
context_str = "\n".join(" ".join(paragraph) for paragraph in paragraphs)
inputs = f"Context: {context_str}\n\nQuestion: {input_text}\n\nAnswer:"
try:
output, cost = await self._generate_output(graph, inputs)
score, extracted_output = self.calculate_score(expected_output, output)
if (
score < 0.3
): # We set the threshold for collecting incorrect questions to 0.3, as F1 Score cannot be simply judged using 0-1
self.log_mismatch(input_text, expected_output, output, extracted_output)
return input_text, context_str, output, expected_output, score, cost
except Exception as e:
logger.info(f"Maximum retries reached. Skipping this sample. Error: {e}")
return input_text, context_str, str(e), expected_output, 0.0, 0.0
def get_result_columns(self) -> List[str]:
return ["question", "context", "prediction", "expected_output", "score", "cost"]
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/aflow/benchmark/mbpp.py | metagpt/ext/aflow/benchmark/mbpp.py | import threading
import time
from typing import Any, Callable, Dict, List, Optional, Tuple
from tenacity import retry, retry_if_exception_type, stop_after_attempt, wait_fixed
from metagpt.ext.aflow.benchmark.benchmark import BaseBenchmark
from metagpt.logs import logger
from metagpt.utils.sanitize import sanitize
class MBPPBenchmark(BaseBenchmark):
def __init__(self, name: str, file_path: str, log_path: str):
super().__init__(name, file_path, log_path)
class TimeoutError(Exception):
pass
def run_with_timeout(self, func, timeout):
result = []
stop_event = threading.Event()
def target():
try:
result.append(func())
except Exception as e:
result.append(e)
finally:
stop_event.set()
thread = threading.Thread(target=target)
thread.start()
is_timeout = not stop_event.wait(timeout)
if is_timeout:
raise self.TimeoutError("Function execution timed out")
if not result:
return None
if isinstance(result[0], Exception):
raise result[0]
return result[0]
def check_solution(self, solution, test, entry_point):
solution = sanitize(code=solution, entrypoint=entry_point)
try:
global_dict = {
"math": __import__("math"),
"hashlib": __import__("hashlib"),
"re": __import__("re"),
"List": List,
"Dict": Dict,
"Tuple": Tuple,
"Optional": Optional,
"Any": Any,
}
exec(solution, global_dict)
if entry_point not in global_dict:
raise ValueError(f"Function {entry_point} is not defined in the solution.")
exec(test, global_dict)
check = global_dict["check"]
result = self.run_with_timeout(check, 15)
if result is None:
result = (self.PASS, "The solution passed all test cases.")
except self.TimeoutError:
result = (
self.FAIL,
"Execution timed out. Please check if your solution contains infinite loops or overly time-consuming operations.",
)
except Exception as e:
error_message = f"Error: {str(e)}.\n Solution: {solution}.\n Test: {test}"
result = (self.FAIL, error_message)
with open("error.log", "a", encoding="utf-8") as log_file:
log_file.write(f"{time.strftime('%Y-%m-%d %H:%M:%S')} - {error_message}\n")
return result
@retry(stop=stop_after_attempt(5), wait=wait_fixed(1), retry=retry_if_exception_type(Exception), reraise=True)
async def _generate_output(self, graph, prompt, entry_point):
return await graph(prompt, entry_point)
async def evaluate_problem(self, data: dict, graph: Callable) -> Tuple[str, str, str, float, float]:
input_text = data["prompt"]
expected_output = "\nCorrect Solution:\ndef " + data["code"]
try:
# Generate prediction using the graph function
prediction, cost = await self._generate_output(graph, input_text, data["entry_point"])
# Check the solution
ret = self.check_solution(prediction, data["test"], data["entry_point"])
test_case_details = ret[1]
expected_output = test_case_details + "\nCorrect Solution:" + data["code"]
# Calculate score based on the check result
score = 1.0 if ret[0] == self.PASS else 0.0
# Log mismatch if the score is 0
if score == 0:
self.log_mismatch(input_text, expected_output, prediction, score)
return input_text, prediction, expected_output, score, cost
except Exception as e:
logger.info(f"Maximum retries reached. Skipping this sample. Error: {e}")
return input_text, str(e), expected_output, 0.0, 0.0
def calculate_score(self, expected_output: str, prediction: str) -> Tuple[float, str]:
# The scoring logic for MBPP is already implemented in evaluate_problem, this is just to conform to the interface
return 0.0, prediction
def get_result_columns(self) -> List[str]:
return ["inputs", "prediction", "expected_output", "score", "cost"]
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/aflow/benchmark/humaneval.py | metagpt/ext/aflow/benchmark/humaneval.py | import asyncio
import threading
import time
from typing import Any, Callable, Dict, List, Optional, Tuple
from tenacity import retry, retry_if_exception_type, stop_after_attempt, wait_fixed
from metagpt.ext.aflow.benchmark.benchmark import BaseBenchmark
from metagpt.logs import logger
from metagpt.utils.sanitize import sanitize
class HumanEvalBenchmark(BaseBenchmark):
def __init__(self, name: str, file_path: str, log_path: str):
super().__init__(name, file_path, log_path)
class TimeoutError(Exception):
pass
def run_with_timeout(self, func, args, timeout):
result = []
stop_event = threading.Event()
def target():
try:
result.append(func(*args))
except Exception as e:
result.append(e)
finally:
stop_event.set()
thread = threading.Thread(target=target)
thread.start()
is_timeout = not stop_event.wait(timeout)
if is_timeout:
raise self.TimeoutError("Function execution timed out")
if not result:
return None
if isinstance(result[0], Exception):
raise result[0]
return result[0]
def check_solution(self, solution, test, entry_point):
solution = sanitize(code=solution, entrypoint=entry_point)
try:
global_dict = {
"math": __import__("math"),
"hashlib": __import__("hashlib"),
"re": __import__("re"),
"List": List,
"Dict": Dict,
"Tuple": Tuple,
"Optional": Optional,
"Any": Any,
}
# Add handling for special cases
if entry_point == "decode_cyclic":
solution = (
'\n\ndef encode_cyclic(s: str):\n """\n returns encoded string by cycling groups of three characters.\n """\n # split string to groups. Each of length 3.\n groups = [s[(3 * i):min((3 * i + 3), len(s))] for i in range((len(s) + 2) // 3)]\n # cycle elements in each group. Unless group has fewer elements than 3.\n groups = [(group[1:] + group[0]) if len(group) == 3 else group for group in groups]\n return "".join(groups)'
+ "\n\n"
+ solution
)
elif entry_point == "decode_shift":
solution = (
'\n\ndef encode_shift(s: str):\n """\n returns encoded string by shifting every character by 5 in the alphabet.\n """\n return "".join([chr(((ord(ch) + 5 - ord("a")) % 26) + ord("a")) for ch in s])\n\n\n'
+ solution
)
elif entry_point == "find_zero":
solution = (
"\n\ndef poly(xs: list, x: float):\n return sum(coeff * (x ** i) for i, coeff in enumerate(xs))\n\n"
+ solution
)
exec(solution, global_dict)
if entry_point not in global_dict:
raise ValueError(f"Function {entry_point} is not defined in the solution.")
exec(test, global_dict)
check = global_dict["check"]
result = self.run_with_timeout(check, (global_dict[entry_point],), 15)
if result is None:
result = (self.PASS, "The solution passed all test cases.")
except self.TimeoutError:
result = (
self.FAIL,
"Execution timed out. Please check if your solution contains infinite loops or overly time-consuming operations.",
)
except Exception as e:
error_message = f"Error: {str(e)}.\n Solution: {solution}.\n Test: {test}"
result = (self.FAIL, error_message)
with open("error.log", "a", encoding="utf-8") as log_file:
log_file.write(f"{time.strftime('%Y-%m-%d %H:%M:%S')} - {error_message}\n")
return result
@retry(stop=stop_after_attempt(5), wait=wait_fixed(1), retry=retry_if_exception_type(Exception), reraise=True)
async def _generate_output(self, graph, prompt, entry_point):
# Generate output with a timeout of 60 seconds
return await asyncio.wait_for(graph(prompt, entry_point), timeout=60)
async def evaluate_problem(self, data: dict, graph: Callable) -> Tuple[str, str, str, float, float]:
input_text = data["prompt"]
expected_output = (
"\nCorrect Solution:\ndef "
+ data["entry_point"]
+ "(params you should put here):"
+ "\n\n"
+ data["canonical_solution"]
)
try:
# Generate prediction using the graph function
prediction, cost = await self._generate_output(graph, input_text, data["entry_point"])
# Check the solution
ret = self.check_solution(prediction, data["test"], data["entry_point"])
test_case_details = ret[1]
expected_output = test_case_details + expected_output
# Calculate score based on the check result
score = 1.0 if ret[0] == self.PASS else 0.0
# Log mismatch if the score is 0
if score == 0:
self.log_mismatch(input_text, expected_output, prediction, score)
return input_text, prediction, expected_output, score, cost
except asyncio.TimeoutError:
logger.info("Timeout error. Skipping this sample.")
return input_text, "Timeout", expected_output, 0.0, 0.0
except Exception as e:
logger.info(f"Maximum retries reached. Skipping this sample. Error: {e}")
return input_text, str(e), expected_output, 0.0, 0.0
def calculate_score(self, expected_output: str, prediction: str) -> Tuple[float, str]:
# The scoring logic for HumanEval is already implemented in evaluate_problem, this is just to conform to the interface
return 0.0, prediction
def get_result_columns(self) -> List[str]:
return ["inputs", "prediction", "expected_output", "score", "cost"]
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/aflow/benchmark/utils.py | metagpt/ext/aflow/benchmark/utils.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/7/24 16:37
@Author : didi
@File : utils.py
"""
import json
import os
import numpy as np
from metagpt.utils.common import read_json_file, write_json_file
def generate_random_indices(n, n_samples, test=False):
"""
Generate random indices
"""
def _set_seed(seed=42):
np.random.seed(seed)
_set_seed()
indices = np.arange(n)
np.random.shuffle(indices)
if test:
return indices[n_samples:]
else:
return indices[:n_samples]
def split_data_set(file_path, samples, test=False):
data = []
with open(file_path, "r") as file:
for line in file:
data.append(json.loads(line))
random_indices = generate_random_indices(len(data), samples, test)
data = [data[i] for i in random_indices]
return data
def log_mismatch(problem, expected_output, prediction, predicted_number, path):
log_data = {
"question": problem,
"right_answer": expected_output,
"model_output": prediction,
"extracted_output": predicted_number,
}
log_file = os.path.join(path, "log.json")
# Check if the log file already exists
if os.path.exists(log_file):
# If it exists, load the existing log data
data = read_json_file(log_file)
else:
# If it does not exist, create a new log list
data = []
# Add the new log entry
data.append(log_data)
# Write the data back to log.json file
write_json_file(log_file, data, encoding="utf-8", indent=4)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/aflow/benchmark/benchmark.py | metagpt/ext/aflow/benchmark/benchmark.py | import asyncio
import json
import os
from abc import ABC, abstractmethod
from datetime import datetime
from pathlib import Path
from typing import Any, Callable, List, Tuple
import aiofiles
import pandas as pd
from tqdm.asyncio import tqdm_asyncio
from metagpt.logs import logger
from metagpt.utils.common import write_json_file
class BaseBenchmark(ABC):
def __init__(self, name: str, file_path: str, log_path: str):
self.name = name
self.file_path = file_path
self.log_path = log_path
PASS = "PASS"
FAIL = "FAIL"
async def load_data(self, specific_indices: List[int] = None) -> List[dict]:
data = []
async with aiofiles.open(self.file_path, mode="r", encoding="utf-8") as file:
async for line in file:
data.append(json.loads(line))
if specific_indices is not None:
filtered_data = [data[i] for i in specific_indices if i < len(data)]
return filtered_data
return data
def save_results_to_csv(self, results: List[Tuple[Any, ...]], columns: List[str]):
df = pd.DataFrame(results, columns=columns)
avg_score = df["score"].mean()
t_cost = df["cost"].max()
a_cost = t_cost / len(df) if len(df) > 0 else 0
current_time = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"{avg_score:.5f}_{current_time}.csv"
output_file = os.path.join(self.log_path, filename)
df.to_csv(output_file, index=False)
logger.info(f"Results saved to {output_file}")
return avg_score, a_cost, t_cost
def log_mismatch(
self,
problem: str,
expected_output: Any,
prediction: str,
extracted_output: Any,
extract_answer_code: str = "None",
):
log_data = {
"question": problem,
"right_answer": expected_output,
"model_output": prediction,
"extracted_output": extracted_output,
"extract_answer_code": extract_answer_code,
}
log_file = Path(self.log_path) / "log.json"
if log_file.exists():
with log_file.open("r", encoding="utf-8") as f:
try:
data = json.load(f)
except json.JSONDecodeError:
data = []
else:
data = []
data.append(log_data)
write_json_file(log_file, data, encoding="utf-8", indent=4)
@abstractmethod
async def evaluate_problem(self, problem: dict, graph: Callable) -> Tuple[Any, ...]:
pass
@abstractmethod
def calculate_score(self, expected_output: Any, prediction: Any) -> Tuple[float, Any]:
pass
@abstractmethod
def get_result_columns(self) -> List[str]:
pass
async def evaluate_all_problems(self, data: List[dict], graph: Callable, max_concurrent_tasks: int = 50):
semaphore = asyncio.Semaphore(max_concurrent_tasks)
async def sem_evaluate(problem):
async with semaphore:
return await self.evaluate_problem(problem, graph)
tasks = [sem_evaluate(problem) for problem in data]
return await tqdm_asyncio.gather(*tasks, desc=f"Evaluating {self.name} problems", total=len(data))
async def run_evaluation(self, graph: Callable, va_list: List[int], max_concurrent_tasks: int = 50):
data = await self.load_data(va_list)
results = await self.evaluate_all_problems(data, graph, max_concurrent_tasks)
columns = self.get_result_columns()
average_score, average_cost, total_cost = self.save_results_to_csv(results, columns)
logger.info(f"Average score on {self.name} dataset: {average_score:.5f}")
logger.info(f"Total Cost: {total_cost:.5f}")
return average_score, average_cost, total_cost
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/aflow/benchmark/math.py | metagpt/ext/aflow/benchmark/math.py | import inspect
import re
from math import isclose
from typing import Any, Callable, List, Tuple
import regex
from sympy import N, simplify
from sympy.parsing.latex import parse_latex
from sympy.parsing.sympy_parser import parse_expr
from tenacity import retry, retry_if_exception_type, stop_after_attempt, wait_fixed
from metagpt.ext.aflow.benchmark.benchmark import BaseBenchmark
from metagpt.logs import logger
class MATHBenchmark(BaseBenchmark):
def __init__(self, name: str, file_path: str, log_path: str):
super().__init__(name, file_path, log_path)
def extract_model_answer(self, text: str) -> str:
pattern = r"\\boxed{((?:[^{}]|{[^{}]*})*)}"
boxed_matches = re.findall(pattern, text, re.DOTALL)
if boxed_matches:
return boxed_matches[-1].strip()
sentence_end_pattern = r"(?<!\d)[.!?]\s+"
sentences = re.split(sentence_end_pattern, text)
sentences = [s.strip() for s in sentences if s.strip()]
return sentences[-1] if sentences else ""
def calculate_score(self, expected_output: str, prediction: str) -> Tuple[int, str]:
expected_answer = self.extract_model_answer(expected_output)
predicted_answer = self.extract_model_answer(prediction)
if self.math_equal(predicted_answer, expected_answer):
return 1, predicted_answer
else:
return 0, predicted_answer
def math_equal(self, prediction: Any, reference: Any) -> bool:
if str(prediction) == str(reference):
return True
try:
if self.is_digit(prediction) and self.is_digit(reference):
prediction = self.parse_digits(prediction)
reference = self.parse_digits(reference)
return isclose(prediction, reference, abs_tol=1e-3)
except:
pass
try:
return self.symbolic_equal(prediction, reference)
except:
pass
return False
def is_digit(self, num):
return self.parse_digits(num) is not None
def parse_digits(self, num):
num = regex.sub(",", "", str(num))
try:
return float(num)
except:
if num.endswith("%"):
num = num[:-1]
if num.endswith("\\"):
num = num[:-1]
try:
return float(num) / 100
except:
pass
return None
def symbolic_equal(self, a, b):
def _parse(s):
for f in [parse_latex, parse_expr]:
try:
return f(s)
except:
pass
return s
a = _parse(a)
b = _parse(b)
try:
if simplify(a - b) == 0:
return True
except:
pass
try:
if isclose(N(a), N(b), abs_tol=1e-3):
return True
except:
pass
return False
def get_function_code(self, func):
try:
source_code = inspect.getsource(func)
return source_code
except OSError:
return "no code"
@retry(stop=stop_after_attempt(5), wait=wait_fixed(1), retry=retry_if_exception_type(Exception), reraise=True)
async def _generate_output(self, graph, input_text):
return await graph(input_text)
async def evaluate_problem(self, problem: dict, graph: Callable) -> Tuple[str, str, str, int, float]:
input_text = problem["problem"]
expected_output = problem["solution"]
try:
output, cost = await self._generate_output(graph, input_text)
uni_score, extracted_output = self.calculate_score(expected_output, output)
if uni_score == 0:
self.log_mismatch(
input_text,
expected_output,
output,
extracted_output,
extract_answer_code=self.get_function_code(self.extract_model_answer),
)
return input_text, output, expected_output, uni_score, cost
except Exception as e:
logger.info(f"Maximum retries reached. Skipping this sample. Error: {e}")
return input_text, str(e), expected_output, 0.0, 0.0
def get_result_columns(self) -> List[str]:
return ["question", "prediction", "expected_output", "score", "cost"]
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/aflow/data/download_data.py | metagpt/ext/aflow/data/download_data.py | # -*- coding: utf-8 -*-
# @Date : 2024-10-20
# @Author : MoshiQAQ & didi
# @Desc : Download and extract dataset files
import os
import tarfile
from typing import Dict
import requests
from tqdm import tqdm
from metagpt.logs import logger
def download_file(url: str, filename: str) -> None:
"""Download a file from the given URL and show progress."""
response = requests.get(url, stream=True)
total_size = int(response.headers.get("content-length", 0))
block_size = 1024
progress_bar = tqdm(total=total_size, unit="iB", unit_scale=True)
with open(filename, "wb") as file:
for data in response.iter_content(block_size):
size = file.write(data)
progress_bar.update(size)
progress_bar.close()
def extract_tar_gz(filename: str, extract_path: str) -> None:
"""Extract a tar.gz file to the specified path."""
with tarfile.open(filename, "r:gz") as tar:
tar.extractall(path=extract_path)
def process_dataset(url: str, filename: str, extract_path: str) -> None:
"""Download, extract, and clean up a dataset."""
logger.info(f"Downloading {filename}...")
download_file(url, filename)
logger.info(f"Extracting {filename}...")
extract_tar_gz(filename, extract_path)
logger.info(f"{filename} download and extraction completed.")
os.remove(filename)
logger.info(f"Removed {filename}")
# Define the datasets to be downloaded
# Users can modify this list to choose which datasets to download
datasets_to_download: Dict[str, Dict[str, str]] = {
"datasets": {
"url": "https://drive.google.com/uc?export=download&id=1DNoegtZiUhWtvkd2xoIuElmIi4ah7k8e",
"filename": "aflow_data.tar.gz",
"extract_path": "metagpt/ext/aflow/data",
},
"results": {
"url": "https://drive.google.com/uc?export=download&id=1Sr5wjgKf3bN8OC7G6cO3ynzJqD4w6_Dv",
"filename": "result.tar.gz",
"extract_path": "metagpt/ext/aflow/data/results",
},
"initial_rounds": {
"url": "https://drive.google.com/uc?export=download&id=1UBoW4WBWjX2gs4I_jq3ALdXeLdwDJMdP",
"filename": "initial_rounds.tar.gz",
"extract_path": "metagpt/ext/aflow/scripts/optimized",
},
}
def download(required_datasets, if_first_download: bool = True):
"""Main function to process all selected datasets"""
if if_first_download:
for dataset_name in required_datasets:
dataset = datasets_to_download[dataset_name]
extract_path = dataset["extract_path"]
process_dataset(dataset["url"], dataset["filename"], extract_path)
else:
logger.info("Skip downloading datasets")
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/cr/__init__.py | metagpt/ext/cr/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/cr/actions/__init__.py | metagpt/ext/cr/actions/__init__.py | python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false | |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/cr/actions/code_review.py | metagpt/ext/cr/actions/code_review.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
import json
import re
from pathlib import Path
import aiofiles
from unidiff import PatchSet
from metagpt.actions.action import Action
from metagpt.ext.cr.utils.cleaner import (
add_line_num_on_patch,
get_code_block_from_patch,
rm_patch_useless_part,
)
from metagpt.ext.cr.utils.schema import Point
from metagpt.logs import logger
from metagpt.utils.common import parse_json_code_block
from metagpt.utils.report import EditorReporter
CODE_REVIEW_PROMPT_TEMPLATE = """
NOTICE
Let's think and work step by step.
With the given pull-request(PR) Patch, and referenced Points(Code Standards), you should compare each point with the code one-by-one within 4000 tokens.
The Patch code has added line number at the first character each line for reading, but the review should focus on new added code inside the `Patch` (lines starting with line number and '+').
Each point is start with a line number and follows with the point description.
## Patch
```
{patch}
```
## Points
{points}
## Output Format
```json
[
{{
"commented_file": "The file path which you give a comment from the patch",
"comment": "The chinese comment of code which do not meet point description and give modify suggestions",
"code_start_line": "the code start line number like `10` in the Patch of current comment,",
"code_end_line": "the code end line number like `15` in the Patch of current comment",
"point_id": "The point id which the `comment` references to"
}}
]
```
CodeReview guidelines:
- Generate code `comment` that do not meet the point description.
- Each `comment` should be restricted inside the `commented_file`.
- Try to provide diverse and insightful comments across different `commented_file`.
- Don't suggest to add docstring unless it's necessary indeed.
- If the same code error occurs multiple times, it cannot be omitted, and all places need to be identified.But Don't duplicate at the same place with the same comment!
- Every line of code in the patch needs to be carefully checked, and laziness cannot be omitted. It is necessary to find out all the places.
- The `comment` and `point_id` in the Output must correspond to and belong to the same one `Point`.
Strictly Observe:
Just print the PR Patch comments in json format like **Output Format**.
And the output JSON must be able to be parsed by json.loads() without any errors.
"""
CODE_REVIEW_COMFIRM_SYSTEM_PROMPT = """
You are a professional engineer with {code_language} stack, and good at code review comment result judgement.Let's think and work step by step.
"""
CODE_REVIEW_COMFIRM_TEMPLATE = """
## Code
```
{code}
```
## Code Review Comments
{comment}
## Description of Defects
{desc}
## Reference Example for Judgment
{example}
## Your Task:
1. First, check if the code meets the requirements and does not violate any defects. If it meets the requirements and does not violate any defects, print `False` and do not proceed with further judgment.
2. Based on the `Reference Example for Judgment` provided, determine if the `Code` and `Code Review Comments` match. If they match, print `True`; otherwise, print `False`.
Note: Your output should only be `True` or `False` without any explanations.
"""
class CodeReview(Action):
name: str = "CodeReview"
def format_comments(self, comments: list[dict], points: list[Point], patch: PatchSet):
new_comments = []
logger.debug(f"original comments: {comments}")
for cmt in comments:
try:
if cmt.get("commented_file").endswith(".py"):
points = [p for p in points if p.language == "Python"]
elif cmt.get("commented_file").endswith(".java"):
points = [p for p in points if p.language == "Java"]
else:
continue
for p in points:
point_id = int(cmt.get("point_id", -1))
if point_id == p.id:
code_start_line = cmt.get("code_start_line")
code_end_line = cmt.get("code_end_line")
code = get_code_block_from_patch(patch, code_start_line, code_end_line)
new_comments.append(
{
"commented_file": cmt.get("commented_file"),
"code": code,
"code_start_line": code_start_line,
"code_end_line": code_end_line,
"comment": cmt.get("comment"),
"point_id": p.id,
"point": p.text,
"point_detail": p.detail,
}
)
break
except Exception:
pass
logger.debug(f"new_comments: {new_comments}")
return new_comments
async def confirm_comments(self, patch: PatchSet, comments: list[dict], points: list[Point]) -> list[dict]:
points_dict = {point.id: point for point in points}
new_comments = []
for cmt in comments:
try:
point = points_dict[cmt.get("point_id")]
code_start_line = cmt.get("code_start_line")
code_end_line = cmt.get("code_end_line")
# 如果代码位置为空的话,那么就将这条记录丢弃掉
if not code_start_line or not code_end_line:
logger.info("False")
continue
# 代码增加上下文,提升confirm的准确率
code = get_code_block_from_patch(
patch, str(max(1, int(code_start_line) - 3)), str(int(code_end_line) + 3)
)
pattern = r"^[ \t\n\r(){}[\];,]*$"
if re.match(pattern, code):
code = get_code_block_from_patch(
patch, str(max(1, int(code_start_line) - 5)), str(int(code_end_line) + 5)
)
code_language = "Java"
code_file_ext = cmt.get("commented_file", ".java").split(".")[-1]
if code_file_ext == ".java":
code_language = "Java"
elif code_file_ext == ".py":
code_language = "Python"
prompt = CODE_REVIEW_COMFIRM_TEMPLATE.format(
code=code,
comment=cmt.get("comment"),
desc=point.text,
example=point.yes_example + "\n" + point.no_example,
)
system_prompt = [CODE_REVIEW_COMFIRM_SYSTEM_PROMPT.format(code_language=code_language)]
resp = await self.llm.aask(prompt, system_msgs=system_prompt)
if "True" in resp or "true" in resp:
new_comments.append(cmt)
except Exception:
logger.info("False")
logger.info(f"original comments num: {len(comments)}, confirmed comments num: {len(new_comments)}")
return new_comments
async def cr_by_points(self, patch: PatchSet, points: list[Point]):
comments = []
valid_patch_count = 0
for patched_file in patch:
if not patched_file:
continue
if patched_file.path.endswith(".py"):
points = [p for p in points if p.language == "Python"]
valid_patch_count += 1
elif patched_file.path.endswith(".java"):
points = [p for p in points if p.language == "Java"]
valid_patch_count += 1
else:
continue
group_points = [points[i : i + 3] for i in range(0, len(points), 3)]
for group_point in group_points:
points_str = "id description\n"
points_str += "\n".join([f"{p.id} {p.text}" for p in group_point])
prompt = CODE_REVIEW_PROMPT_TEMPLATE.format(patch=str(patched_file), points=points_str)
resp = await self.llm.aask(prompt)
json_str = parse_json_code_block(resp)[0]
comments_batch = json.loads(json_str)
if comments_batch:
patched_file_path = patched_file.path
for c in comments_batch:
c["commented_file"] = patched_file_path
comments.extend(comments_batch)
if valid_patch_count == 0:
raise ValueError("Only code reviews for Python and Java languages are supported.")
return comments
async def run(self, patch: PatchSet, points: list[Point], output_file: str):
patch: PatchSet = rm_patch_useless_part(patch)
patch: PatchSet = add_line_num_on_patch(patch)
result = []
async with EditorReporter(enable_llm_stream=True) as reporter:
log_cr_output_path = Path(output_file).with_suffix(".log")
await reporter.async_report(
{"src_path": str(log_cr_output_path), "filename": log_cr_output_path.name}, "meta"
)
comments = await self.cr_by_points(patch=patch, points=points)
log_cr_output_path.parent.mkdir(exist_ok=True, parents=True)
async with aiofiles.open(log_cr_output_path, "w", encoding="utf-8") as f:
await f.write(json.dumps(comments, ensure_ascii=False, indent=2))
await reporter.async_report(log_cr_output_path)
if len(comments) != 0:
comments = self.format_comments(comments, points, patch)
comments = await self.confirm_comments(patch=patch, comments=comments, points=points)
for comment in comments:
if comment["code"]:
if not (comment["code"].isspace()):
result.append(comment)
async with EditorReporter() as reporter:
src_path = output_file
cr_output_path = Path(output_file)
await reporter.async_report(
{"type": "CodeReview", "src_path": src_path, "filename": cr_output_path.name}, "meta"
)
async with aiofiles.open(cr_output_path, "w", encoding="utf-8") as f:
await f.write(json.dumps(comments, ensure_ascii=False, indent=2))
await reporter.async_report(cr_output_path)
return result
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/cr/actions/modify_code.py | metagpt/ext/cr/actions/modify_code.py | import datetime
import itertools
import re
from pathlib import Path
from typing import Optional
from unidiff import PatchSet
from metagpt.actions.action import Action
from metagpt.ext.cr.utils.cleaner import (
add_line_num_on_patch,
get_code_block_from_patch,
rm_patch_useless_part,
)
from metagpt.utils.common import CodeParser
from metagpt.utils.report import EditorReporter
SYSTEM_MSGS_PROMPT = """
You're an adaptive software developer who excels at refining code based on user inputs. You're proficient in creating Git patches to represent code modifications.
"""
MODIFY_CODE_PROMPT = """
NOTICE
With the given pull-request(PR) Patch, and referenced Comments(Code Standards), you should modify the code according the Comments.
The Patch code has added line no at the first character each line for reading, but the modification should focus on new added code inside the `Patch` (lines starting with line no and '+').
## Patch
```
{patch}
```
## Comments
{comments}
## Output Format
<the standard git patch>
Code Modification guidelines:
- Look at `point_detail`, modify the code by `point_detail`, use `code_start_line` and `code_end_line` to locate the problematic code, fix the problematic code by `point_detail` in Comments.Strictly,must handle the fix plan given by `point_detail` in every comment.
- Create a patch that satifies the git patch standard and your fixes need to be marked with '+' and '-',but notice:don't change the hunk header!
- Do not print line no in the new patch code.
Just print the Patch in the format like **Output Format**.
"""
class ModifyCode(Action):
name: str = "Modify Code"
pr: str
async def run(self, patch: PatchSet, comments: list[dict], output_dir: Optional[str] = None) -> str:
patch: PatchSet = rm_patch_useless_part(patch)
patch: PatchSet = add_line_num_on_patch(patch)
#
for comment in comments:
code_start_line = comment.get("code_start_line")
code_end_line = comment.get("code_end_line")
# 如果代码位置为空的话,那么就将这条记录丢弃掉
if code_start_line and code_end_line:
code = get_code_block_from_patch(
patch, str(max(1, int(code_start_line) - 3)), str(int(code_end_line) + 3)
)
pattern = r"^[ \t\n\r(){}[\];,]*$"
if re.match(pattern, code):
code = get_code_block_from_patch(
patch, str(max(1, int(code_start_line) - 5)), str(int(code_end_line) + 5)
)
# 代码增加上下文,提升代码修复的准确率
comment["code"] = code
# 去掉CR时LLM给的comment的影响,应该使用既定的修复方案
comment.pop("comment")
# 按照 commented_file 字段进行分组
comments.sort(key=lambda x: x["commented_file"])
grouped_comments = {
key: list(group) for key, group in itertools.groupby(comments, key=lambda x: x["commented_file"])
}
resp = None
for patched_file in patch:
patch_target_file_name = str(patched_file.path).split("/")[-1]
if patched_file.path not in grouped_comments:
continue
comments_prompt = ""
index = 1
for grouped_comment in grouped_comments[patched_file.path]:
comments_prompt += f"""
<comment{index}>
{grouped_comment}
</comment{index}>\n
"""
index += 1
prompt = MODIFY_CODE_PROMPT.format(patch=patched_file, comments=comments_prompt)
output_dir = (
Path(output_dir)
if output_dir
else self.config.workspace.path / "modify_code" / str(datetime.date.today()) / self.pr
)
patch_file = output_dir / f"{patch_target_file_name}.patch"
patch_file.parent.mkdir(exist_ok=True, parents=True)
async with EditorReporter(enable_llm_stream=True) as reporter:
await reporter.async_report(
{"type": "Patch", "src_path": str(patch_file), "filename": patch_file.name}, "meta"
)
resp = await self.llm.aask(msg=prompt, system_msgs=[SYSTEM_MSGS_PROMPT])
resp = CodeParser.parse_code(resp, "diff")
with open(patch_file, "w", encoding="utf-8") as file:
file.writelines(resp)
await reporter.async_report(patch_file)
return resp
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/cr/utils/schema.py | metagpt/ext/cr/utils/schema.py | from typing import Literal
from pydantic import BaseModel, Field
class Point(BaseModel):
id: int = Field(default=0, description="ID of the point.")
text: str = Field(default="", description="Content of the point.")
language: Literal["Python", "Java"] = Field(
default="Python", description="The programming language that the point corresponds to."
)
file_path: str = Field(default="", description="The file that the points come from.")
start_line: int = Field(default=0, description="The starting line number that the point refers to.")
end_line: int = Field(default=0, description="The ending line number that the point refers to.")
detail: str = Field(default="", description="File content from start_line to end_line.")
yes_example: str = Field(default="", description="yes of point examples")
no_example: str = Field(default="", description="no of point examples")
def rag_key(self) -> str:
return self.text
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/cr/utils/cleaner.py | metagpt/ext/cr/utils/cleaner.py | """Cleaner."""
from unidiff import Hunk, PatchedFile, PatchSet
from metagpt.logs import logger
def rm_patch_useless_part(patch: PatchSet, used_suffix: list[str] = ["java", "py"]) -> PatchSet:
new_patch = PatchSet("")
useless_files = []
for pfile in patch:
suffix = str(pfile.target_file).split(".")[-1]
if suffix not in used_suffix or pfile.is_removed_file:
useless_files.append(pfile.path)
continue
new_patch.append(pfile)
logger.info(f"total file num: {len(patch)}, used file num: {len(new_patch)}, useless_files: {useless_files}")
return new_patch
def add_line_num_on_patch(patch: PatchSet, start_line_num: int = 1) -> PatchSet:
new_patch = PatchSet("")
lineno = start_line_num
for pfile in patch:
new_pfile = PatchedFile(
source=pfile.source_file,
target=pfile.target_file,
source_timestamp=pfile.source_timestamp,
target_timestamp=pfile.target_timestamp,
)
for hunk in pfile:
arr = [str(line) for line in hunk]
new_hunk = Hunk(
src_start=hunk.source_start,
src_len=hunk.source_length,
tgt_start=hunk.target_start,
tgt_len=hunk.target_length,
section_header=hunk.section_header,
)
for line in arr:
# if len(line) > 0 and line[0] in ["+", "-"]:
# line = f"{lineno} {line}"
# lineno += 1
line = f"{lineno} {line}"
lineno += 1
new_hunk.append(line)
new_pfile.append(new_hunk)
new_patch.append(new_pfile)
return new_patch
def get_code_block_from_patch(patch: PatchSet, code_start_line: str, code_end_line: str) -> str:
line_arr = str(patch).split("\n")
code_arr = []
add_line_tag = False
for line in line_arr:
if line.startswith(f"{code_start_line} "):
add_line_tag = True
if add_line_tag:
new_line = " ".join(line.split(" ")[1:]) # rm line-no tag
code_arr.append(new_line)
if line.startswith(f"{code_end_line} "):
add_line_tag = False
return "\n".join(code_arr)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/cr/utils/__init__.py | metagpt/ext/cr/utils/__init__.py | python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false | |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/sela/run_experiment.py | metagpt/ext/sela/run_experiment.py | import argparse
import asyncio
from metagpt.ext.sela.data.custom_task import get_mle_is_lower_better, get_mle_task_id
from metagpt.ext.sela.runner.autogluon import GluonRunner
from metagpt.ext.sela.runner.autosklearn import AutoSklearnRunner
from metagpt.ext.sela.runner.custom import CustomRunner
from metagpt.ext.sela.runner.mcts import MCTSRunner
from metagpt.ext.sela.runner.random_search import RandomSearchRunner
from metagpt.ext.sela.runner.runner import Runner
def get_args(cmd=True):
parser = argparse.ArgumentParser()
parser.add_argument("--name", type=str, default="")
parser.add_argument(
"--exp_mode",
type=str,
default="mcts",
choices=["mcts", "rs", "base", "custom", "greedy", "autogluon", "random", "autosklearn"],
)
parser.add_argument("--role_timeout", type=int, default=1000)
get_di_args(parser)
get_mcts_args(parser)
get_rs_exp_args(parser)
if cmd:
args = parser.parse_args()
else:
args = parser.parse_args("")
if args.custom_dataset_dir:
args.external_eval = False
args.eval_func = "mlebench"
args.from_scratch = True
args.task = get_mle_task_id(args.custom_dataset_dir)
args.low_is_better = get_mle_is_lower_better(args.task)
return args
def get_mcts_args(parser):
parser.add_argument("--load_tree", dest="load_tree", action="store_true")
parser.add_argument("--no_load_tree", dest="load_tree", action="store_false")
parser.set_defaults(load_tree=False)
parser.add_argument("--rollouts", type=int, default=5)
parser.add_argument("--use_fixed_insights", dest="use_fixed_insights", action="store_true")
parser.set_defaults(use_fixed_insights=False)
parser.add_argument("--start_task_id", type=int, default=2)
parser.add_argument(
"--from_scratch", dest="from_scratch", action="store_true", help="Generate solutions from scratch"
)
parser.set_defaults(from_scratch=False)
parser.add_argument("--no_external_eval", dest="external_eval", action="store_false")
parser.set_defaults(external_eval=True)
parser.add_argument("--eval_func", type=str, default="sela", choices=["sela", "mlebench"])
parser.add_argument("--custom_dataset_dir", type=str, default=None)
parser.add_argument("--max_depth", type=int, default=4)
def get_rs_exp_args(parser):
parser.add_argument("--rs_mode", type=str, default="single", choices=["single", "set"])
parser.add_argument("--is_multimodal", action="store_true", help="Specify if the model is multi-modal")
def get_di_args(parser):
parser.add_argument("--task", type=str, default="titanic")
parser.add_argument("--low_is_better", dest="low_is_better", action="store_true")
parser.set_defaults(low_is_better=False)
parser.add_argument("--reflection", dest="reflection", action="store_true")
parser.add_argument("--no_reflection", dest="reflection", action="store_false")
parser.add_argument("--num_experiments", type=int, default=1)
parser.add_argument("--special_instruction", type=str, default=None, choices=["ag", "stacking", "text", "image"])
parser.set_defaults(reflection=True)
async def main(args):
if args.exp_mode == "mcts":
runner = MCTSRunner(args)
elif args.exp_mode == "greedy":
runner = MCTSRunner(args, tree_mode="greedy")
elif args.exp_mode == "random":
runner = MCTSRunner(args, tree_mode="random")
elif args.exp_mode == "rs":
runner = RandomSearchRunner(args)
elif args.exp_mode == "base":
runner = Runner(args)
elif args.exp_mode == "autogluon":
runner = GluonRunner(args)
elif args.exp_mode == "custom":
runner = CustomRunner(args)
elif args.exp_mode == "autosklearn":
runner = AutoSklearnRunner(args)
else:
raise ValueError(f"Invalid exp_mode: {args.exp_mode}")
await runner.run_experiment()
if __name__ == "__main__":
args = get_args()
asyncio.run(main(args))
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/sela/utils.py | metagpt/ext/sela/utils.py | import os
import re
from datetime import datetime
from pathlib import Path
import nbformat
import yaml
from loguru import logger as _logger
from nbclient import NotebookClient
from nbformat.notebooknode import NotebookNode
from metagpt.roles.role import Role
def load_data_config(file_path="data.yaml"):
with open(file_path, "r") as stream:
data_config = yaml.safe_load(stream)
return data_config
DATASET_CONFIG = load_data_config("datasets.yaml")
DATA_CONFIG = load_data_config()
DATA_CONFIG["datasets"] = DATASET_CONFIG["datasets"]
def get_mcts_logger():
logfile_level = "DEBUG"
name: str = None
current_date = datetime.now()
formatted_date = current_date.strftime("%Y%m%d")
log_name = f"{name}_{formatted_date}" if name else formatted_date # name a log with prefix name
# _logger.remove()
_logger.level("MCTS", color="<green>", no=25)
# _logger.add(sys.stderr, level=print_level)
_logger.add(Path(DATA_CONFIG["work_dir"]) / DATA_CONFIG["role_dir"] / f"{log_name}.txt", level=logfile_level)
_logger.propagate = False
return _logger
mcts_logger = get_mcts_logger()
def get_exp_pool_path(task_name, data_config, pool_name="analysis_pool"):
datasets_dir = data_config["datasets_dir"]
if task_name in data_config["datasets"]:
dataset = data_config["datasets"][task_name]
data_path = os.path.join(datasets_dir, dataset["dataset"])
else:
raise ValueError(
f"Dataset {task_name} not found in config file. Available datasets: {data_config['datasets'].keys()}"
)
exp_pool_path = os.path.join(data_path, f"{pool_name}.json")
if not os.path.exists(exp_pool_path):
return None
return exp_pool_path
def change_plan(role, plan):
print(f"Change next plan to: {plan}")
tasks = role.planner.plan.tasks
finished = True
for i, task in enumerate(tasks):
if not task.code:
finished = False
break
if not finished:
tasks[i].plan = plan
return finished
def is_cell_to_delete(cell: NotebookNode) -> bool:
if "outputs" in cell:
for output in cell["outputs"]:
if output and "traceback" in output:
return True
return False
def process_cells(nb: NotebookNode) -> NotebookNode:
new_cells = []
i = 1
for cell in nb["cells"]:
if cell["cell_type"] == "code" and not is_cell_to_delete(cell):
cell["execution_count"] = i
new_cells.append(cell)
i = i + 1
nb["cells"] = new_cells
return nb
def save_notebook(role: Role, save_dir: str = "", name: str = "", save_to_depth=False):
save_dir = Path(save_dir)
tasks = role.planner.plan.tasks
nb = process_cells(role.execute_code.nb)
os.makedirs(save_dir, exist_ok=True)
file_path = save_dir / f"{name}.ipynb"
nbformat.write(nb, file_path)
if save_to_depth:
clean_file_path = save_dir / f"{name}_clean.ipynb"
codes = [task.code for task in tasks if task.code]
clean_nb = nbformat.v4.new_notebook()
for code in codes:
clean_nb.cells.append(nbformat.v4.new_code_cell(code))
nbformat.write(clean_nb, clean_file_path)
async def load_execute_notebook(role):
tasks = role.planner.plan.tasks
codes = [task.code for task in tasks if task.code]
executor = role.execute_code
executor.nb = nbformat.v4.new_notebook()
executor.nb_client = NotebookClient(executor.nb, timeout=role.role_timeout)
# await executor.build()
for code in codes:
outputs, success = await executor.run(code)
print(f"Execution success: {success}, Output: {outputs}")
print("Finish executing the loaded notebook")
return executor
def clean_json_from_rsp(text):
pattern = r"```json(.*?)```"
matches = re.findall(pattern, text, re.DOTALL)
if matches:
json_str = "\n".join(matches)
return json_str
else:
return ""
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/sela/experimenter.py | metagpt/ext/sela/experimenter.py | from __future__ import annotations
import asyncio
import json
import os
from pydantic import model_validator
from metagpt.actions.di.write_analysis_code import WriteAnalysisCode
from metagpt.const import SERDESER_PATH
from metagpt.ext.sela.utils import mcts_logger, save_notebook
from metagpt.roles.di.data_interpreter import DataInterpreter
from metagpt.schema import Message, Task, TaskResult
from metagpt.utils.common import CodeParser, write_json_file
CODE_BLOCK_RESULT = """
## Code:
{code}
## Execution Result:
{result}
"""
EXTRACT_SCORE_PROMPT = """
# Code Blocks
{code_block}
# Instruction:
Based on the code and execution result, please extract the **final scores** and return it as a dictionary.
If you cannot find the scores, please still return a dictionary with the keys 'train_score', 'dev_score', and 'test_score', and set the values to -1.
# Format:
```json
{{
"train_score": x.x,
"dev_score": x.x,
"test_score": x.x
}}
```
"""
class TimeoutException(Exception):
pass
def async_timeout():
def decorator(func):
async def wrapper(self, *args, **kwargs):
try:
result = await asyncio.wait_for(func(self, *args, **kwargs), timeout=self.role_timeout)
except asyncio.TimeoutError:
text = f"Function timed out after {self.role_timeout} seconds"
mcts_logger.error(text)
self.save_state()
raise TimeoutException(text)
return result
return wrapper
return decorator
class Experimenter(DataInterpreter):
node_id: str = "0"
start_task_id: int = 1
state_saved: bool = False
role_dir: str = SERDESER_PATH.joinpath("team", "environment", "roles", "Experimenter")
role_timeout: int = 1000
def get_node_name(self):
return f"Node-{self.node_id}"
def get_next_instruction(self):
return self.planner.plan.tasks[self.start_task_id].instruction
def change_next_instruction(self, new_instruction):
if new_instruction is not None:
self.planner.plan.task_map[str(self.start_task_id)].instruction = new_instruction
self.remap_tasks()
def update_til_start_task(self, role: Experimenter, backward: bool = True):
if backward:
# make sure the previous task instructions are matched
assert (
self.start_task_id == role.start_task_id - 1
), f"start_task_id: {self.start_task_id}, role.start_task_id: {role.start_task_id}"
for i in range(self.start_task_id):
if (
self.planner.plan.task_map[str(self.start_task_id)].instruction
!= role.planner.plan.task_map[str(self.start_task_id)].instruction
):
mcts_logger.info("Previous task instructions not matched")
self.remap_tasks()
return
# copy new role's task (self.start_task_id) to current role
self.planner.plan.task_map[str(self.start_task_id)] = role.planner.plan.task_map[
str(self.start_task_id)
].model_copy()
self.remap_tasks()
else:
assert (
self.start_task_id == role.start_task_id + 1
), f"start_task_id: {self.start_task_id}, role.start_task_id: {role.start_task_id}"
if int(role.planner.plan.current_task_id) > self.start_task_id:
for i in range(role.start_task_id):
self.planner.plan.task_map[str(i)] = role.planner.plan.task_map[str(i)].model_copy()
self.remap_tasks()
async def get_score(self):
score_dict = await self.llm_extract_score()
score_dict["score"] = score_dict["dev_score"]
return score_dict
async def llm_extract_score(self):
# result_text = self.planner.plan.task_map[str(len(self.planner.plan.task_map))].result
# code_text = self.planner.plan.task_map[str(len(self.planner.plan.task_map))].code
num_tasks = len(self.planner.plan.task_map)
task_map = self.planner.plan.task_map
code_block = "\n".join(
[
CODE_BLOCK_RESULT.format(code=task_map[str(i + 1)].code, result=task_map[str(i + 1)].result)
for i in range(num_tasks)
]
)
rsp = await self.llm.aask(EXTRACT_SCORE_PROMPT.format(code_block=code_block, role="user"))
json_block = CodeParser.parse_code(block=None, text=rsp)
score_dict = json.loads(json_block)
return score_dict
@model_validator(mode="after")
def set_plan_and_tool(self) -> "Interpreter":
if self.planner.plan.goal != "":
self.set_actions([WriteAnalysisCode])
self._set_state(0)
print("Plan already exists, skipping initialization.")
return self
print("Initializing plan and tool...")
return super().set_plan_and_tool()
async def _act_on_task(self, current_task: Task) -> TaskResult:
"""Useful in 'plan_and_act' mode. Wrap the output in a TaskResult for review and confirmation."""
mcts_logger.info(f"The current_task is: {current_task}")
code, result, is_success = await self._write_and_exec_code()
task_result = TaskResult(code=code, result=result, is_success=is_success)
if int(current_task.task_id) == self.start_task_id + 1:
# fe_id = current_task.dependent_task_ids
self.save_state()
save_notebook(role=self, save_dir=self.role_dir, name=self.get_node_name(), save_to_depth=True)
else:
save_notebook(role=self, save_dir=self.role_dir, name=self.get_node_name())
return task_result
def get_solution(self):
codes = [task.code for task in self.planner.plan.tasks]
results = [task.result for task in self.planner.plan.tasks]
return {"codes": codes, "results": results}
def save_state(self, static_save=False):
"""
attribute:
state_saved - the state has been saved
input:
static_save - saving the state without changing the state_saved flag - used when a new role is created
"""
if self.state_saved and not static_save:
return
if not static_save:
self.state_saved = True
mcts_logger.log("MCTS", f"Saving state at task {self.start_task_id}")
else:
mcts_logger.log("MCTS", "Static Saving")
stg_path = self.role_dir
name = self.get_node_name()
role_path = os.path.join(stg_path, f"{name}.json")
# save state as json file
write_json_file(role_path, self.model_dump())
def remap_tasks(self):
self.planner.plan.tasks = [
self.planner.plan.task_map[task_id] for task_id in sorted(self.planner.plan.task_map.keys())
]
@async_timeout()
async def run(self, with_message=None) -> Message | None:
"""Observe, and think and act based on the results of the observation"""
if with_message == "continue":
mcts_logger.info("Continue to run")
self.rc.working_memory.clear()
self.working_memory.clear()
rsp = await self.react()
self.set_todo(None)
self.publish_message(rsp)
return rsp
return await super().run(with_message)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/sela/insights/instruction_generator.py | metagpt/ext/sela/insights/instruction_generator.py | import json
import os
import random
from difflib import SequenceMatcher
from metagpt.ext.sela.insights.solution_designer import SolutionDesigner
from metagpt.ext.sela.utils import clean_json_from_rsp, load_data_config, mcts_logger
from metagpt.llm import LLM
from metagpt.schema import Message
REFLECTION_SYSTEM_MSG = "As a Kaggle Grandmaster competing in a challenge, your task is to suggest potential evolutionary improvements that could enhance the performance of the baseline code."
CHANGE_INSTRUCTION = """
# Original instruction
{instruction}
# Insights
{insights}
Rewrite the original instruction according to the insights
(If the original instruction involves splitting the data, ensure that your insights are integrated with the data split instructions,
rather than replacing them.)
# Expected Output Hard Format
```json
{{
"Original Instruction": "original instruction",
"New Instruction": "new instruction"
}}
```
"""
DATA_CONFIG = load_data_config()
class InstructionGenerator:
data_config = DATA_CONFIG
def __init__(self, state, use_fixed_insights, from_scratch):
self.state = state
self.file_path = state["exp_pool_path"]
if state["custom_dataset_dir"]:
with open(f"{state['custom_dataset_dir']}/description.md", "r", encoding="utf-8") as file:
self.dataset_info = file.read()
else:
dataset_info_path = (
f"{self.data_config['datasets_dir']}/{state['dataset_config']['dataset']}/dataset_info.json"
)
with open(dataset_info_path, "r") as file:
self.dataset_info = json.load(file)
self.use_fixed_insights = use_fixed_insights
self.proposer = SolutionDesigner()
if self.file_path is None:
self.from_scratch = True
else:
self.from_scratch = from_scratch
async def initialize(self):
if self.from_scratch:
self.insight_pool = await self.generate_solutions_from_scratch(self.dataset_info, self.state["task"])
else:
self.insight_pool = self.load_insight_pool(self.file_path, self.use_fixed_insights)
@staticmethod
def load_json_data(json_dir):
with open(json_dir, "r") as file:
json_data = json.load(file)
return json_data
@staticmethod
def _random_sample(analysis, num_samples):
return random.sample(analysis, num_samples)
@staticmethod
def sample_instruction_set(data):
data_dict = {}
for item in data:
task_id = item["task_id"]
if task_id not in data_dict:
data_dict[task_id] = []
data_dict[task_id].append(item)
instruction_set = []
for task_id in sorted(data_dict.keys()):
instruction_set.append(random.choice(data_dict[task_id]))
return instruction_set
@staticmethod
def format_output(rsp):
rsp_list = []
new_data = []
rsp_list.append(rsp)
for item in rsp_list:
item_dict = json.loads(item)
data = {
"Insights": item_dict,
}
new_data.append(data)
return new_data
@staticmethod
def load_insight_pool(file_path, use_fixed_insights, task_id=None):
data = InstructionGenerator.load_json_data(file_path)
if use_fixed_insights:
current_directory = os.path.dirname(__file__)
fixed_insights = InstructionGenerator.load_json_data(f"{current_directory}/fixed_insights.json")
data.extend(fixed_insights)
for item in data:
if "task_id" not in item:
raise ValueError("task_id is not found in the insight_pool")
if task_id:
data = [item for item in data if int(item["task_id"]) == int(task_id)]
return data
async def generate_new_instructions(self, task_id, original_instruction, max_num, ext_info=None):
data = self.insight_pool
new_instructions = []
if len(data) == 0:
mcts_logger.log("MCTS", f"No insights available for task {task_id}")
# return [original_instruction] # Return the original instruction if no insights are available
for i in range(max_num):
if len(data) == 0:
insights = "No insights available"
else:
item = data[i]
insights = item["Analysis"]
new_instruction = await InstructionGenerator.generate_new_instruction(
original_instruction, insights, ext_info
)
new_instructions.append(new_instruction)
return new_instructions
async def propose_new_insights(self, solution, score):
new_insights = await self.proposer.propose_insights(solution, score)
added_insights = self.add_insight(new_insights)
return added_insights
async def generate_solutions_from_scratch(self, dataset_info, dataset_name):
insight_pool = await self.proposer.generate_solutions(dataset_info, dataset_name, save_analysis_pool=False)
return insight_pool
def add_insight(self, new_insights):
added_insights = []
for new_insight in new_insights:
if not self.is_similar_to_existing(new_insight):
added_insights.append(new_insight)
self.insight_pool.append(new_insight)
return added_insights
def is_similar_to_existing(self, new_insight, similarity_threshold=0.8):
for existing_insight in self.insight_pool:
similarity = self.calculate_similarity(new_insight["Analysis"], existing_insight["Analysis"])
if similarity > similarity_threshold:
return True
return False
@staticmethod
def calculate_similarity(text1, text2):
return SequenceMatcher(None, text1, text2).ratio()
@staticmethod
async def generate_new_instruction(original_instruction, insights, ext_info):
prompt = CHANGE_INSTRUCTION.format(instruction=original_instruction, insights=insights)
llm = LLM()
context = llm.format_msg([Message(content=prompt, role="user")])
llm_response = await llm.aask(context, system_msgs=[REFLECTION_SYSTEM_MSG])
rsp = clean_json_from_rsp(llm_response)
new_instruction = json.loads(rsp)["New Instruction"]
return new_instruction
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/sela/insights/solution_designer.py | metagpt/ext/sela/insights/solution_designer.py | import json
from metagpt.ext.sela.utils import clean_json_from_rsp, load_data_config
from metagpt.llm import LLM
DATA_CONFIG = load_data_config()
DATASET_DESCRIPTION_SELA_PROMPT = """
# Dataset Description
{dataset}
# Dataset Metadata
{metadata}
# Dataset Head
{head}
"""
DATASET_DESCRIPTION_CUSTOM_PROMPT = """
# Dataset Description
{dataset_description}
"""
DATASET_INSIGHT_PROMPT = """
{description}
# Instruction
Propose insights to help improve the performance of the model on this dataset.
The insights should be proposed based on the dataset description with different task types.
Each task type should have at least 5 insights.
Make sure each method is diverse enough and can be implemented separately.
Be specific about models' choices, ensemble and tuning techniques, and preprocessing & feature engineering techniques.
Your model choices should be advanced enough to be helpful.
# Format
```json
[
{{
"task_type": "EDA",
"insights": [
"insight1",
"insight2",
"insight3",
...
"insightN"
]
}},
{{
"task_type": "Data Preprocessing",
"insights": [
"insight1",
"insight2",
"insight3",
...
"insightN"
]
}},
{{
"task_type": "Feature Engineering",
"insights": [
"insight1",
"insight2",
"insight3",
...
"insightN"
]
}},
{{
"task_type": "Model Training",
"insights": [
"insight1",
"insight2",
"insight3",
...
"insightN"
]
}}
]
```
"""
INSIGHT_PROPOSAL_PROMPT = """
You are an AI assistant tasked with analyzing a machine learning solution and proposing new insights to improve its performance. Given the current solution code and development score, suggest innovative approaches to enhance the model.
Current Solution Code:
{solution_code}
Development Score: {dev_score}
Based on this information, propose 3-5 new insights across different aspects of the machine learning pipeline (Data Preprocessing, Feature Engineering, and Model Training). Your insights should be specific, actionable, and have the potential to improve the model's performance.
Please format your response as a JSON array with the following structure:
[
{{
"task_type": "Data Preprocessing",
"insights": [
"insight1",
"insight2"
]
}},
{{
"task_type": "Feature Engineering",
"insights": [
"insight1",
"insight2"
]
}},
{{
"task_type": "Model Training",
"insights": [
"insight1",
"insight2"
]
}}
]
"""
KEY_DATASET_FEATURES = [
"NumberOfClasses",
"NumberOfFeatures",
"NumberOfInstances",
"NumberOfInstancesWithMissingValues",
"NumberOfMissingValues",
"NumberOfNumericFeatures",
"NumberOfSymbolicFeatures",
]
TASK_TO_ID = {"EDA": 1, "Data Preprocessing": 2, "Feature Engineering": 3, "Model Training": 4, "Model Evaluation": 5}
class SolutionDesigner:
data_dir: str = DATA_CONFIG["datasets_dir"]
async def generate_solutions(self, dataset_info, dataset_name, save_analysis_pool=True):
llm = LLM()
if type(dataset_info) == dict:
description_prompt = DATASET_DESCRIPTION_SELA_PROMPT.format(
dataset=dataset_info["description"],
metadata=self.metadata_builder(dataset_info["metadata"]),
head=dataset_info["df_head"],
)
else:
description_prompt = DATASET_DESCRIPTION_CUSTOM_PROMPT.format(dataset_description=dataset_info)
context = DATASET_INSIGHT_PROMPT.format(description=description_prompt)
rsp = await llm.aask(context)
rsp = clean_json_from_rsp(rsp)
analysis_pool = self.process_analysis_pool(json.loads(rsp))
if save_analysis_pool:
dataset_path = f"{self.data_dir}/{dataset_name}"
self.save_analysis_pool(dataset_path, analysis_pool)
return analysis_pool
async def propose_new_insights(self, solution, score):
llm = LLM()
context = INSIGHT_PROPOSAL_PROMPT.format(solution_code=solution, dev_score=score)
rsp = await llm.aask(context)
rsp = clean_json_from_rsp(rsp)
new_insights = self.process_analysis_pool(json.loads(rsp))
return new_insights
def process_analysis_pool(self, insights_rsp):
analysis_pool = []
for task_type_insights in insights_rsp:
task_type = task_type_insights["task_type"]
for insight in task_type_insights["insights"]:
analysis_pool.append({"Analysis": insight, "Category": task_type, "task_id": TASK_TO_ID[task_type]})
return analysis_pool
def metadata_builder(self, qualities):
metadata = {}
for key in KEY_DATASET_FEATURES:
metadata[key] = qualities.get(key, "N/A")
metadata_text = json.dumps(metadata, indent=4)
return metadata_text
def save_analysis_pool(self, dataset_path, analysis_pool):
fpath = f"{dataset_path}/ds_analysis_pool.json"
with open(fpath, "w") as file:
json.dump(analysis_pool, file, indent=4)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/sela/scripts/visualize_experiment.py | metagpt/ext/sela/scripts/visualize_experiment.py | import networkx as nx
from metagpt.ext.sela.evaluation.visualize_mcts import (
build_tree_recursive,
visualize_tree,
)
from metagpt.ext.sela.MCTS import MCTS, create_initial_state, initialize_di_root_node
from metagpt.ext.sela.run_experiment import get_args
from metagpt.ext.sela.utils import DATA_CONFIG
if __name__ == "__main__":
args = get_args()
data_config = DATA_CONFIG
state = create_initial_state(args.task, 0, data_config, args=args)
role, node = initialize_di_root_node(state)
mcts = MCTS(
root_node=node,
max_depth=5,
use_fixed_insights=False,
)
mcts.load_tree()
mcts.load_node_order()
root = mcts.root_node
node_order = mcts.node_order
G = nx.DiGraph()
build_tree_recursive(G, "0", root, node_order)
visualize_tree(G, save_path=f"results/{args.task}-tree.png")
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/sela/runner/runner.py | metagpt/ext/sela/runner/runner.py | import datetime
import json
import os
import numpy as np
import pandas as pd
from metagpt.ext.sela.evaluation.evaluation import evaluate_score
from metagpt.ext.sela.experimenter import Experimenter
from metagpt.ext.sela.search.tree_search import create_initial_state
from metagpt.ext.sela.utils import DATA_CONFIG, save_notebook
class Runner:
result_path: str = "results/base"
data_config = DATA_CONFIG
start_task_id = 1
def __init__(self, args, **kwargs):
self.args = args
self.start_time_raw = datetime.datetime.now()
self.start_time = self.start_time_raw.strftime("%Y%m%d%H%M")
self.state = create_initial_state(
self.args.task,
start_task_id=self.start_task_id,
data_config=self.data_config,
args=self.args,
)
async def run_di(self, di, user_requirement, run_idx):
max_retries = 3
num_runs = 1
run_finished = False
while num_runs <= max_retries and not run_finished:
try:
await di.run(user_requirement)
score_dict = await di.get_score()
score_dict = self.evaluate(score_dict, self.state)
run_finished = True
except Exception as e:
print(f"Error: {e}")
num_runs += 1
# save_notebook(role=di, save_dir=self.result_path, name=f"{self.args.task}_{self.start_time}_{run_idx}")
save_name = self.get_save_name()
save_notebook(role=di, save_dir=self.result_path, name=f"{save_name}_{run_idx}")
if not run_finished:
score_dict = {"train_score": -1, "dev_score": -1, "test_score": -1, "score": -1}
return score_dict
def summarize_results(self, results):
dev_scores = [result["score_dict"]["dev_score"] for result in results]
best_dev_score = (
max(dev_scores)
if not self.args.low_is_better
else min([score for score in dev_scores if score != -1] + [np.inf])
)
best_score_idx = dev_scores.index(best_dev_score)
test_scores = [result["score_dict"]["test_score"] for result in results]
avg_score = sum(test_scores) / len(test_scores)
global_best_score = (
max(test_scores)
if not self.args.low_is_better
else min([score for i, score in enumerate(test_scores) if dev_scores[i] != -1] + [np.inf])
)
results.insert(
0,
{
"best_dev_score": best_dev_score,
"best_dev_score_idx": best_score_idx,
"best_dev_test_score": test_scores[best_score_idx],
"avg_test_score": avg_score,
"global_best_test_score": global_best_score,
},
)
return results
async def run_experiment(self):
state = self.state
user_requirement = state["requirement"]
results = []
for i in range(self.args.num_experiments):
di = Experimenter(node_id="0", use_reflection=self.args.reflection, role_timeout=self.args.role_timeout)
score_dict = await self.run_di(di, user_requirement, run_idx=i)
results.append(
{"idx": i, "score_dict": score_dict, "user_requirement": user_requirement, "args": vars(self.args)}
)
self.save_result(results) # save intermediate results
results = self.summarize_results(results)
self.save_result(results)
def evaluate_prediction(self, split, state):
pred_path = os.path.join(state["work_dir"], state["task"], f"{split}_predictions.csv")
os.makedirs(state["node_dir"], exist_ok=True)
pred_node_path = os.path.join(state["node_dir"], f"{self.start_time}-{split}_predictions.csv")
gt_path = os.path.join(state["datasets_dir"][f"{split}_target"])
preds = pd.read_csv(pred_path)
preds = preds[preds.columns.tolist()[-1]]
preds.to_csv(pred_node_path, index=False)
gt = pd.read_csv(gt_path)["target"]
metric = state["dataset_config"]["metric"]
os.remove(pred_path)
return evaluate_score(preds, gt, metric)
def evaluate(self, score_dict, state):
scores = {
"dev_score": self.evaluate_prediction("dev", state),
"test_score": self.evaluate_prediction("test", state),
}
score_dict.update(scores)
return score_dict
def get_save_name(self):
return f"{self.args.exp_mode}-{self.args.task}_{self.start_time}"
def save_result(self, result):
end_time_raw = datetime.datetime.now()
end_time = end_time_raw.strftime("%Y%m%d%H%M")
time_info = {
"start_time": self.start_time,
"end_time": end_time,
"duration (seconds)": (end_time_raw - self.start_time_raw).seconds,
}
result = result.copy()
result.insert(0, time_info)
save_name = self.get_save_name()
os.makedirs(self.result_path, exist_ok=True)
with open(f"{self.result_path}/{save_name}.json", "w") as f:
json.dump(result, f, indent=4)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/sela/runner/mcts.py | metagpt/ext/sela/runner/mcts.py | import shutil
from metagpt.ext.sela.evaluation.evaluation import (
node_evaluate_score_mlebench,
node_evaluate_score_sela,
)
from metagpt.ext.sela.evaluation.visualize_mcts import get_tree_text
from metagpt.ext.sela.runner.runner import Runner
from metagpt.ext.sela.search.search_algorithm import MCTS, Greedy, Random
class MCTSRunner(Runner):
result_path: str = "results/mcts"
def __init__(self, args, tree_mode=None, **kwargs):
if args.special_instruction == "image":
self.start_task_id = 1 # start from datapreprocessing if it is image task
else:
self.start_task_id = args.start_task_id
if args.eval_func == "sela":
self.eval_func = node_evaluate_score_sela
elif args.eval_func == "mlebench":
self.eval_func = node_evaluate_score_mlebench
super().__init__(args, **kwargs)
self.tree_mode = tree_mode
async def run_experiment(self):
use_fixed_insights = self.args.use_fixed_insights
depth = self.args.max_depth
if self.tree_mode == "greedy":
mcts = Greedy(root_node=None, max_depth=depth, use_fixed_insights=use_fixed_insights)
elif self.tree_mode == "random":
mcts = Random(root_node=None, max_depth=depth, use_fixed_insights=use_fixed_insights)
else:
mcts = MCTS(root_node=None, max_depth=depth, use_fixed_insights=use_fixed_insights)
best_nodes = await mcts.search(state=self.state, args=self.args)
best_node = best_nodes["global_best"]
dev_best_node = best_nodes["dev_best"]
score_dict = best_nodes["scores"]
additional_scores = {"grader": self.eval_func(dev_best_node)}
text, num_generated_codes = get_tree_text(mcts.root_node)
text += f"Generated {num_generated_codes} unique codes.\n"
text += f"Best node: {best_node.id}, score: {best_node.raw_reward}\n"
text += f"Dev best node: {dev_best_node.id}, score: {dev_best_node.raw_reward}\n"
text += f"Grader score: {additional_scores['grader']}\n"
print(text)
results = [
{
"best_node": best_node.id,
"best_node_score": best_node.raw_reward,
"dev_best_node": dev_best_node.id,
"dev_best_node_score": dev_best_node.raw_reward,
"num_generated_codes": num_generated_codes,
"user_requirement": best_node.state["requirement"],
"tree_text": text,
"args": vars(self.args),
"scores": score_dict,
"additional_scores": additional_scores,
}
]
self.save_result(results)
self.copy_notebook(best_node, "best")
self.copy_notebook(dev_best_node, "dev_best")
self.save_tree(text)
def copy_notebook(self, node, name):
node_dir = node.get_node_dir()
node_nb_dir = f"{node_dir}/Node-{node.id}.ipynb"
save_name = self.get_save_name()
copy_nb_dir = f"{self.result_path}/{save_name}_{name}.ipynb"
shutil.copy(node_nb_dir, copy_nb_dir)
def save_tree(self, tree_text):
save_name = self.get_save_name()
fpath = f"{self.result_path}/{save_name}_tree.txt"
with open(fpath, "w") as f:
f.write(tree_text)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/sela/runner/autosklearn.py | metagpt/ext/sela/runner/autosklearn.py | from datetime import datetime
from functools import partial
import pandas as pd
from metagpt.ext.sela.evaluation.evaluation import evaluate_score
from metagpt.ext.sela.runner.custom import CustomRunner
def custom_scorer(y_true, y_pred, metric_name):
return evaluate_score(y_pred, y_true, metric_name)
class ASRunner:
time_limit = 600
def __init__(self, state=None):
self.state = state
self.datasets = self.state["datasets_dir"]
def create_autosklearn_scorer(self, metric_name):
from autosklearn.metrics import make_scorer
return make_scorer(name=metric_name, score_func=partial(custom_scorer, metric_name=metric_name))
def run(self):
import autosklearn.classification
import autosklearn.regression
train_path = self.datasets["train"]
dev_wo_target_path = self.datasets["dev_wo_target"]
test_wo_target_path = self.datasets["test_wo_target"]
target_col = self.state["dataset_config"]["target_col"]
train_data = pd.read_csv(train_path)
dev_data = pd.read_csv(dev_wo_target_path)
test_data = pd.read_csv(test_wo_target_path)
eval_metric = self.state["dataset_config"]["metric"]
X_train = train_data.drop(columns=[target_col])
y_train = train_data[target_col]
if eval_metric == "rmse":
automl = autosklearn.regression.AutoSklearnRegressor(
time_left_for_this_task=self.time_limit,
metric=self.create_autosklearn_scorer(eval_metric),
memory_limit=8192,
tmp_folder="AutosklearnModels/as-{}-{}".format(
self.state["task"], datetime.now().strftime("%y%m%d_%H%M")
),
n_jobs=-1,
)
elif eval_metric in ["f1", "f1 weighted"]:
automl = autosklearn.classification.AutoSklearnClassifier(
time_left_for_this_task=self.time_limit,
metric=self.create_autosklearn_scorer(eval_metric),
memory_limit=8192,
tmp_folder="AutosklearnModels/as-{}-{}".format(
self.state["task"], datetime.now().strftime("%y%m%d_%H%M")
),
n_jobs=-1,
)
else:
raise ValueError(f"Unsupported metric: {eval_metric}")
automl.fit(X_train, y_train)
dev_preds = automl.predict(dev_data)
test_preds = automl.predict(test_data)
return {"test_preds": test_preds, "dev_preds": dev_preds}
class AutoSklearnRunner(CustomRunner):
result_path: str = "results/autosklearn"
def __init__(self, args, **kwargs):
super().__init__(args, **kwargs)
self.framework = ASRunner(self.state)
async def run_experiment(self):
result = self.framework.run()
user_requirement = self.state["requirement"]
dev_preds = result["dev_preds"]
test_preds = result["test_preds"]
score_dict = {
"dev_score": self.evaluate_predictions(dev_preds, "dev"),
"test_score": self.evaluate_predictions(test_preds, "test"),
}
results = [
0,
{
"score_dict": score_dict,
"user_requirement": user_requirement,
"args": vars(self.args),
},
]
self.save_result(results)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/sela/runner/autogluon.py | metagpt/ext/sela/runner/autogluon.py | import os
from datetime import datetime
import pandas as pd
from metagpt.ext.sela.runner.custom import CustomRunner
class AGRunner:
def __init__(self, state=None):
self.state = state
self.datasets = self.state["datasets_dir"]
def run(self):
from autogluon.tabular import TabularDataset, TabularPredictor
train_path = self.datasets["train"]
dev_path = self.datasets["dev"]
dev_wo_target_path = self.datasets["dev_wo_target"]
test_wo_target_path = self.datasets["test_wo_target"]
target_col = self.state["dataset_config"]["target_col"]
train_data = TabularDataset(train_path)
dev_data = TabularDataset(dev_path)
dev_wo_target_data = TabularDataset(dev_wo_target_path)
test_data = TabularDataset(test_wo_target_path)
eval_metric = self.state["dataset_config"]["metric"].replace(" ", "_")
predictor = TabularPredictor(
label=target_col,
eval_metric=eval_metric,
path="AutogluonModels/ag-{}-{}".format(self.state["task"], datetime.now().strftime("%y%m%d_%H%M")),
).fit(train_data=train_data, tuning_data=dev_data, num_gpus=1)
dev_preds = predictor.predict(dev_wo_target_data)
test_preds = predictor.predict(test_data)
return {"test_preds": test_preds, "dev_preds": dev_preds}
def run_multimodal(self):
from autogluon.multimodal import MultiModalPredictor
target_col = self.state["dataset_config"]["target_col"]
train_path = self.datasets["train"]
dev_path = self.datasets["dev"]
dev_wo_target_path = self.datasets["dev_wo_target"] # Updated variable name
test_wo_target_path = self.datasets["test_wo_target"]
eval_metric = self.state["dataset_config"]["metric"].replace(" ", "_")
# Load the datasets
train_data, dev_data, dev_wo_target_data, test_data = self.load_split_dataset(
train_path, dev_path, dev_wo_target_path, test_wo_target_path
)
# Create and fit the predictor
predictor = MultiModalPredictor(
label=target_col,
eval_metric=eval_metric,
path="AutogluonModels/ag-{}-{}".format(self.state["task"], datetime.now().strftime("%y%m%d_%H%M")),
).fit(train_data=train_data, tuning_data=dev_data)
# Make predictions on dev and test datasets
dev_preds = predictor.predict(dev_wo_target_data)
test_preds = predictor.predict(test_data)
# Return predictions for dev and test datasets
return {"dev_preds": dev_preds, "test_preds": test_preds}
def load_split_dataset(self, train_path, dev_path, dev_wo_target_path, test_wo_target_path):
"""
Loads training, dev, and test datasets from given file paths
Args:
train_path (str): Path to the training dataset.
dev_path (str): Path to the dev dataset with target labels.
dev_wo_target_path (str): Path to the dev dataset without target labels.
test_wo_target_path (str): Path to the test dataset without target labels.
Returns:
train_data (pd.DataFrame): Loaded training dataset with updated image paths.
dev_data (pd.DataFrame): Loaded dev dataset with updated image paths.
dev_wo_target_data (pd.DataFrame): Loaded dev dataset without target labels and updated image paths.
test_data (pd.DataFrame): Loaded test dataset with updated image paths.
"""
# Define the root path to append
root_folder = os.path.join("F:/Download/Dataset/", self.state["task"])
# Load the datasets
train_data = pd.read_csv(train_path)
dev_data = pd.read_csv(dev_path) # Load dev dataset with target labels
dev_wo_target_data = pd.read_csv(dev_wo_target_path) # Load dev dataset without target labels
test_data = pd.read_csv(test_wo_target_path)
# Get the name of the first column (assuming it's the image path column)
image_column = train_data.columns[0]
# Append root folder path to the image column in each dataset
train_data[image_column] = train_data[image_column].apply(lambda x: os.path.join(root_folder, x))
dev_data[image_column] = dev_data[image_column].apply(lambda x: os.path.join(root_folder, x))
dev_wo_target_data[image_column] = dev_wo_target_data[image_column].apply(
lambda x: os.path.join(root_folder, x)
)
test_data[image_column] = test_data[image_column].apply(lambda x: os.path.join(root_folder, x))
return train_data, dev_data, dev_wo_target_data, test_data
class GluonRunner(CustomRunner):
result_path: str = "results/autogluon"
def __init__(self, args, **kwargs):
super().__init__(args, **kwargs)
self.framework = AGRunner(self.state)
self.is_multimodal = args.is_multimodal if hasattr(args, "is_multimodal") else False
async def run_experiment(self):
if not self.is_multimodal:
result = self.framework.run()
else:
result = self.framework.run_multimodal()
assert result is not None
user_requirement = self.state["requirement"]
dev_preds = result["dev_preds"]
test_preds = result["test_preds"]
score_dict = {
"dev_score": self.evaluate_predictions(dev_preds, "dev"),
"test_score": self.evaluate_predictions(test_preds, "test"),
}
results = [0, {"score_dict": score_dict, "user_requirement": user_requirement, "args": vars(self.args)}]
self.save_result(results)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/sela/runner/__init__.py | metagpt/ext/sela/runner/__init__.py | python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false | |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/sela/runner/random_search.py | metagpt/ext/sela/runner/random_search.py | from metagpt.ext.sela.experimenter import Experimenter
from metagpt.ext.sela.insights.instruction_generator import InstructionGenerator
from metagpt.ext.sela.runner.runner import Runner
from metagpt.ext.sela.utils import get_exp_pool_path
EXPS_PROMPT = """
When doing the tasks, you can refer to the insights below:
{experience}
"""
class RandomSearchRunner(Runner):
result_path: str = "results/random_search"
async def run_experiment(self):
# state = create_initial_state(self.args.task, start_task_id=1, data_config=self.data_config, low_is_better=self.args.low_is_better, name="")
user_requirement = self.state["requirement"]
exp_pool_path = get_exp_pool_path(self.args.task, self.data_config, pool_name="ds_analysis_pool")
exp_pool = InstructionGenerator.load_insight_pool(
exp_pool_path, use_fixed_insights=self.args.use_fixed_insights
)
if self.args.rs_mode == "single":
exps = InstructionGenerator._random_sample(exp_pool, self.args.num_experiments)
exps = [exp["Analysis"] for exp in exps]
elif self.args.rs_mode == "set":
exps = []
for i in range(self.args.num_experiments):
exp_set = InstructionGenerator.sample_instruction_set(exp_pool)
exp_set_text = "\n".join([f"{exp['task_id']}: {exp['Analysis']}" for exp in exp_set])
exps.append(exp_set_text)
else:
raise ValueError(f"Invalid mode: {self.args.rs_mode}")
results = []
for i in range(self.args.num_experiments):
di = Experimenter(node_id=str(i), use_reflection=self.args.reflection, role_timeout=self.args.role_timeout)
di.role_dir = f"{di.role_dir}_{self.args.task}"
requirement = user_requirement + EXPS_PROMPT.format(experience=exps[i])
print(requirement)
score_dict = await self.run_di(di, requirement, run_idx=i)
results.append(
{
"idx": i,
"score_dict": score_dict,
"rs_mode": self.args.rs_mode,
"insights": exps[i],
"user_requirement": requirement,
"args": vars(self.args),
}
)
results = self.summarize_results(results)
self.save_result(results)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/sela/runner/custom.py | metagpt/ext/sela/runner/custom.py | import os
import pandas as pd
from metagpt.ext.sela.evaluation.evaluation import evaluate_score
from metagpt.ext.sela.runner.runner import Runner
from metagpt.ext.sela.search.tree_search import create_initial_state
class CustomRunner(Runner):
result_path: str = "results/custom"
def __init__(self, args, **kwargs):
super().__init__(args, **kwargs)
self.framework = kwargs.get("framework", None) # todo
self.task = kwargs.get("task", self.args.task)
self.low_is_better = kwargs.get("low_is_better", self.args.low_is_better)
self.name = kwargs.get("name", "")
self.result_path = f"results/custom_{self.name}"
self.state = create_initial_state(
self.task,
start_task_id=1,
data_config=self.data_config,
args=self.args,
)
def run_experiment(self):
user_requirement = self.state["requirement"]
preds = self.framework.run(user_requirement)
test_preds = preds["test_preds"]
dev_preds = preds["dev_preds"]
score_dict = {
"dev_score": self.evaluate_predictions(dev_preds, "dev"),
"test_score": self.evaluate_predictions(test_preds, "test"),
}
results = {"score_dict": score_dict, "user_requirement": user_requirement, "args": vars(self.args)}
self.save_result(results)
def evaluate_pred_files(self, dev_pred_path, test_pred_path):
dev_preds = pd.read_csv(dev_pred_path)["target"]
test_preds = pd.read_csv(test_pred_path)["target"]
score_dict = {
"dev_score": self.evaluate_score(dev_preds, "dev"),
"test_score": self.evaluate_score(test_preds, "test"),
}
return score_dict
def evaluate_predictions(self, preds, split):
metric = self.state["dataset_config"]["metric"]
gt_path = os.path.join(self.state["datasets_dir"][f"{split}_target"])
gt = pd.read_csv(gt_path)["target"]
score = evaluate_score(preds, gt, metric)
return score
def load_datasets(self):
train_path = self.state["datasets_dir"]["train"]
dev_path = self.state["datasets_dir"]["dev"]
test_path = self.state["datasets_dir"]["test"]
train = pd.read_csv(train_path)
dev = pd.read_csv(dev_path)
test = pd.read_csv(test_path)
return train, dev, test
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/sela/runner/aide.py | metagpt/ext/sela/runner/aide.py | import os
import time
import aide
os.environ["OPENAI_API_KEY"] = "sk-xxx"
os.environ["OPENAI_BASE_URL"] = "your url"
start_time = time.time()
data_dir = "xxx/data/titanic"
goal = f"""
# User requirement
({data_dir}, 'This is a 04_titanic dataset. Your goal is to predict the target column `Survived`.\nPerform data analysis, data preprocessing, feature engineering, and modeling to predict the target. \nReport f1 on the eval data. Do not plot or make any visualizations.\n')
# Data dir
training (with labels): train.csv
testing (without labels): test.csv
dataset description: dataset_info.json (You can use this file to get additional information about the dataset)"""
exp = aide.Experiment(
data_dir=data_dir, # replace this with your own directory
goal=goal,
eval="f1", # replace with your own evaluation metric
)
best_solution = exp.run(steps=10)
print(f"Best solution has validation metric: {best_solution.valid_metric}")
print(f"Best solution code: {best_solution.code}")
end_time = time.time()
execution_time = end_time - start_time
print(f"run time : {execution_time} seconds")
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/sela/runner/mle_bench/instructions.py | metagpt/ext/sela/runner/mle_bench/instructions.py | SUBMISSION_INSTURCTION = """
- **Submission checker**: We provide a tool for checking the validity of your submission file, in the form of a server endpoint at `http://localhost:5000/validate`. If you post a submission file to the server (e.g. `curl -X POST -F "file=@${SUBMISSION_FILE}" ${SERVER_URL}`), you will receive a response message telling you if your submission is valid or not (note that this does NOT give you a score). For your convenience, we provide a script `/home/validate_submission.sh` which can help you make this submission to the validation server.
"""
INSTRUCTIONS = """
You are participating in *MLE-bench*, an offline version of Kaggle competitions adapted to test the coding ability of agents like yourself.
You will be given a machine learning task. You must solve the task by training a model and running the model on the test set to produce a submission file. You may not hand-label the data to produce a submission file.
BENCHMARK INSTRUCTIONS
------
First, please read the instructions below which are specific to our benchmark setup. Note, these instructions can be found again in `runner/mle_bench/instructions.py`.
- **Competition-specific instructions**: The instructions for the competition are available in `{dataset_dir}/description.md`. Read this file to understand the problem setup for the competition you are currently solving.
- **Dataset**: The dataset files for the competition are available in `{dataset_dir}`.
- **Submission**: We have simplified all competition types to use a simple CSV submission format. Regardless of what the competition instructions say, you MUST produce a submission file at `{output_dir}/submission.csv`. We will only evaluate submissions found at this path; failure to submit a `submission.csv` at this path will result in a failed attempt.
- **Test set**: In general, since we do not have access to the original test labels, we have constructed our own train/test split for each competition. As such, the dataset you see in `{dataset_dir}` may differ slightly from what is described in the competition instructions. Nevertheless the files available to you should otherwise be similar to the original and you should proceed with the competition task on the train/test splits available to you.
- **Cheating**: You are not allowed to view others' solutions to this competition. If we detect that your submission is plagiarized from others, the submission will be disqualified.
Your submission will be graded based on the metric in the competition-specific instructions. Your goal is to achieve the best score possible on the competition task.
Wherever there is a conflict between the points above and the competition-specific instructions in `{dataset_dir}/description.md`, the above points should always take priority since these are always true for our offline benchmark setup.
"""
INSTRUCTIONS_OBFUSCATED = """
You are participating in *MLE-bench*, a benchmark designed to test the coding ability of agents like yourself.
You will be given a machine learning task. You must solve the task by training a model and running the model on the test set to produce a submission file. You may not hand-label the data to produce a submission file.
BENCHMARK INSTRUCTIONS
------
First, please read the instructions below which are specific to our benchmark setup. Note, these instructions can be found again in `runner/mle_bench/instructions.py`.
- **Task-specific instructions**: The instructions for the task are available in `{dataset_dir}/description.md`. Read this file to understand the problem setup for the task you are currently solving.
- **Dataset**: The dataset files for the task are available in `{dataset_dir}/`.
- **Submission**: Regardless of what the task instructions say, you MUST produce a submission file at `{output_dir}/submission.csv`. We will only evaluate submissions found at this path; failure to submit a `submission.csv` at this path will result in a failed attempt.
- **Test set**: In general, since we do not have access to the original test labels, we have constructed our own train/test split for each task. As such, the dataset you see in `{dataset_dir}` may differ slightly from what is described in the task instructions. Nevertheless the files available to you should otherwise be similar to the original and you should proceed with the task on the train/test splits available to you.
- **Cheating**: You are not allowed to view others' solutions to this task. If we detect that your submission is plagiarized from others, the submission will be disqualified.
Your submission will be graded based on the metric in the task-specific instructions. Your goal is to achieve the best score possible on the task.
Wherever there is a conflict between the points above and the task-specific instructions in `{dataset_dir}/description.md`, the above points should always take priority since these are always true for our offline benchmark setup.
"""
ADDITIONAL_NOTES = """
ADDITIONAL NOTES
------
- **Compute**: You have access to gpu with the appropriate drivers installed.
"""
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/sela/search/search_algorithm.py | metagpt/ext/sela/search/search_algorithm.py | import numpy as np
from metagpt.ext.sela.search.tree_search import BaseTreeSearch, Node
class Greedy(BaseTreeSearch):
def best_child(self):
if len(self.children) == 0:
return self.root_node
all_children = [child for children in self.children.values() for child in children]
return max(all_children, key=lambda x: x.normalized_reward.get("dev_score", 0))
class Random(BaseTreeSearch):
def best_child(self):
if len(self.children) == 0:
return self.root_node
all_children = [child for children in self.children.values() for child in children]
return np.random.choice(all_children)
class MCTS(BaseTreeSearch):
def best_child(self):
def uct(node: Node):
n_visits = node.visited if node.visited else self.c_unvisited
avg_value = node.avg_value() if node.visited else node.value / self.c_unvisited
return avg_value + self.c_explore * np.sqrt(np.log(node.parent.visited) / n_visits)
if len(self.children) == 0:
return self.root_node
all_children = [child for children in self.children.values() for child in children]
return max(all_children, key=uct)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/sela/search/tree_search.py | metagpt/ext/sela/search/tree_search.py | import json
import os
import pickle
import shutil
import numpy as np
import pandas as pd
from metagpt.ext.sela.data.custom_task import (
get_mle_bench_requirements,
get_mle_task_id,
)
from metagpt.ext.sela.data.dataset import (
generate_task_requirement,
get_split_dataset_path,
)
from metagpt.ext.sela.evaluation.evaluation import evaluate_score
from metagpt.ext.sela.experimenter import Experimenter, TimeoutException
from metagpt.ext.sela.insights.instruction_generator import InstructionGenerator
from metagpt.ext.sela.utils import get_exp_pool_path, load_execute_notebook, mcts_logger
from metagpt.tools.tool_recommend import ToolRecommender
from metagpt.utils.common import read_json_file
def initialize_di_root_node(state: dict, reflection: bool = True):
"""
Initialize the root node of the decision tree.
Args:
state (dict): The initial state of the tree, containing:
- task (str): The task to be performed (e.g., "titanic").
- work_dir (str): The working directory.
- node_dir (str): The directory for the node.
- dataset_config (dict): The configuration of the dataset.
- datasets_dir (str): The directory of the datasets.
- exp_pool_path (str): The path to the experiment pool.
- requirement (str): The requirement for the task.
- has_run (bool): Whether the task has run.
- start_task_id (int): The ID of the starting task.
- low_is_better (bool): Whether a lower score is better.
- role_timeout (int): The timeout for the role.
- external_eval (bool): Whether to use external evaluation.
- custom_dataset_dir (str): The directory of the custom dataset.
reflection (bool, optional): Whether to use reflection. Defaults to True.
Returns:
tuple: A tuple containing the Experimenter role and the root Node.
"""
role = Experimenter(
node_id="0",
start_task_id=state["start_task_id"],
use_reflection=reflection,
role_dir=state["node_dir"],
role_timeout=state["role_timeout"],
)
return role, Node(parent=None, state=state, action=None, value=0)
def create_initial_state(task: str, start_task_id: int, data_config: dict, args):
"""
Create the initial state of the tree.
Args:
task (str): The task to be performed.
start_task_id (int): The ID of the starting task.
data_config (dict): The configuration of the data.
Expected keys: 'datasets', 'work_dir', 'role_dir'.
args (Namespace): The arguments passed to the program.
Expected attributes: 'external_eval', 'custom_dataset_dir', 'special_instruction', 'name', 'low_is_better', 'role_timeout'.
Returns:
dict: The initial state of the tree.
"""
external_eval = args.external_eval
if args.custom_dataset_dir:
dataset_config = None
datasets_dir = args.custom_dataset_dir
requirement = get_mle_bench_requirements(
args.custom_dataset_dir, data_config, special_instruction=args.special_instruction
)
exp_pool_path = None
# external_eval = False # make sure external eval is false if custom dataset is used
task = get_mle_task_id(args.custom_dataset_dir)
else:
dataset_config = data_config["datasets"][task]
if dataset_config["metric"] == "rmse":
args.low_is_better = True
datasets_dir = get_split_dataset_path(task, data_config)
requirement = generate_task_requirement(
task, data_config, is_di=True, special_instruction=args.special_instruction
)
exp_pool_path = get_exp_pool_path(task, data_config, pool_name="ds_analysis_pool")
initial_state = {
"task": task,
"work_dir": data_config["work_dir"],
"node_dir": os.path.join(data_config["work_dir"], data_config["role_dir"], f"{task}{args.name}"),
"dataset_config": dataset_config,
"datasets_dir": datasets_dir, # won't be used if external eval is used
"exp_pool_path": exp_pool_path,
"requirement": requirement,
"has_run": False,
"start_task_id": start_task_id,
"low_is_better": args.low_is_better,
"role_timeout": args.role_timeout,
"external_eval": external_eval,
"custom_dataset_dir": args.custom_dataset_dir,
}
os.makedirs(initial_state["node_dir"], exist_ok=True)
return initial_state
class Node:
state: dict = {}
action: str = None
value: float = 0
visited: int = 0
children: list = []
normalized_reward: dict = {"train_score": 0, "dev_score": 0, "test_score": 0}
parent = None
def __init__(
self, parent=None, state: dict = None, action: str = None, value: float = 0, max_depth: int = 4, **kwargs
):
self.state = state
self.action = action
self.value = value
self.raw_value = 0
self.raw_reward = dict()
self.parent = parent
self.children = []
self.max_depth = max_depth
self.depth = self.generate_depth()
self.id = self.generate_id()
if self.parent is not None:
self.save_node()
def avg_value(self):
if self.visited == 0:
return 0
return self.value / self.visited
def __hash__(self):
return hash(self.id)
def save_node(self):
os.makedirs(self.state["node_dir"], exist_ok=True)
with open(os.path.join(self.state["node_dir"], f"Node-{self.id}.pkl"), "wb") as f:
pickle.dump(self, f)
def load_node(self):
with open(os.path.join(self.state["node_dir"], f"Node-{self.id}.pkl"), "rb") as f:
return pickle.load(f)
def get_depth(self):
return self.depth
def get_node_dir(self):
return self.state["node_dir"]
def generate_depth(self):
if self.parent is None:
return 0
else:
return self.parent.depth + 1
def generate_id(self):
if self.parent is None:
return "0"
else:
num_sibling = len(self.parent.children)
return f"{self.parent.id}-{num_sibling}"
def is_terminal(self):
return int(self.state["start_task_id"]) == self.max_depth + 1 # TODO: Check if this is correct or +1
def is_fully_expanded(self):
return len(self.children) > 0
def add_child(self, child_node):
self.children.append(child_node)
def update(self, reward: dict, child_node=None):
if child_node is not None:
child_role = child_node.load_role()
role = self.load_role()
role.update_til_start_task(child_role)
role.save_state()
else:
self.raw_value = reward["test_score"]
self.value += reward["score"]
self.visited += 1
self.save_node()
def get_role_path(self):
fname = f"Node-{self.id}.json"
role_path = os.path.join(self.state["node_dir"], fname)
return role_path
def load_role(self):
role_dict = read_json_file(self.get_role_path())
if role_dict.get("tool_recommender") is None:
role_dict["tool_recommender"] = ToolRecommender()
elif isinstance(role_dict.get("tool_recommender", {}).get("tools"), dict):
role_dict["tool_recommender"]["tools"] = list(role_dict["tool_recommender"]["tools"].keys())
role = Experimenter(**role_dict)
if self.parent is not None: # TODO: Check this
parent_role = self.parent.load_role()
role.update_til_start_task(parent_role, backward=False)
role.remap_tasks()
return role
def save_new_role(self, role: Experimenter):
role.node_id = self.id
role.start_task_id = self.state["start_task_id"]
role.state_saved = False
role.change_next_instruction(self.action)
mcts_logger.log("MCTS", f"Saving new role: {role.node_id}")
role = role.model_copy()
role.save_state(static_save=True)
async def expand(self, max_children: int, instruction_generator: InstructionGenerator):
if self.is_fully_expanded():
return
role = self.load_role()
original_instruction = role.get_next_instruction()
insights = await instruction_generator.generate_new_instructions(
task_id=role.start_task_id + 1,
original_instruction=original_instruction,
max_num=max_children,
)
new_state = self.state.copy()
new_state["start_task_id"] += 1
for insight in insights:
new_role = role.model_copy()
node = Node(parent=self, state=new_state, action=insight, value=0)
node.save_new_role(new_role)
self.add_child(node)
def get_predictions_path(self, split):
return os.path.join(self.state["node_dir"], f"Node-{self.id}-{split}_predictions.csv")
def get_and_move_predictions(self, split):
if not os.path.exists(self.get_predictions_path(split)):
pred_path = os.path.join(self.state["work_dir"], self.state["task"], f"{split}_predictions.csv")
shutil.copy(pred_path, self.get_predictions_path(split))
os.remove(pred_path)
return pd.read_csv(self.get_predictions_path(split))
def get_gt(self, split):
gt_path = os.path.join(self.state["datasets_dir"][f"{split}_target"])
return pd.read_csv(gt_path)
def evaluate_prediction(self, split):
preds = self.get_and_move_predictions(split)["target"]
gt = self.get_gt(split)["target"]
metric = self.state["dataset_config"]["metric"]
return evaluate_score(preds, gt, metric)
def evaluate_simulation(self, score_dict):
if self.state["external_eval"]: # use external evaluation
scores = {"dev_score": self.evaluate_prediction("dev"), "test_score": self.evaluate_prediction("test")}
scores["score"] = scores["dev_score"]
score_dict.update(scores)
else:
self.get_and_move_predictions("dev")
self.get_and_move_predictions("test")
return score_dict
async def run_node(self, role: Experimenter = None):
if self.is_terminal() and role is not None:
if role.state_saved:
return self.raw_reward
max_retries = 3
num_runs = 1
run_finished = False
while num_runs <= max_retries and not run_finished:
try:
if not role:
role = self.load_role()
await load_execute_notebook(role) # execute previous notebook's code
await role.run(with_message="continue")
else:
await role.run(with_message=self.state["requirement"])
score_dict = await role.get_score()
score_dict = self.evaluate_simulation(score_dict)
self.raw_reward = score_dict
run_finished = True
except TimeoutException as e:
mcts_logger.log("MCTS", f"Role-level timeout: {e}")
break
except Exception as e:
mcts_logger.log("MCTS", f"Error in running the role: {e}")
num_runs += 1
if not run_finished:
mcts_logger.log("MCTS", f"Role {role.node_id} failed to run")
if self.state["low_is_better"]:
score_dict = {"test_score": np.inf, "dev_score": np.inf, "score": np.inf}
else:
score_dict = {"test_score": 0, "dev_score": 0, "score": 0}
self.raw_reward = score_dict
if self.state["low_is_better"]:
# normalized the score to be between 0 and 1, and higher is better
def normalize_score(score):
if score == -1:
return 0
return 1 / (1 + score)
score_dict = {k: normalize_score(v) for k, v in score_dict.items()}
self.normalized_reward = score_dict
result_dict = role.get_solution()
return score_dict, result_dict
class BaseTreeSearch:
# data_path
root_node: Node = None
children: dict = {}
max_depth: int = None
c_explore: float = 1.4
c_unvisited: float = 0.8
node_order: list = []
# insight generator
instruction_generator: InstructionGenerator = None
def __init__(self, root_node: Node, max_depth: int, use_fixed_insights: bool):
self.root_node = root_node
self.max_depth = max_depth
self.use_fixed_insights = use_fixed_insights
def select(self, node: Node):
node = self.best_child()
mcts_logger.log("MCTS", f"Selected node id: {node.id}")
return node
def best_child(self):
raise NotImplementedError
async def expand(self, node: Node, max_children=5):
await node.expand(max_children, self.instruction_generator)
if node not in self.children or not self.children[node]:
self.children[node] = node.children
return node.children
async def simulate(self, node: Node, role=None):
"Returns the reward for a random simulation (to completion) of `node`"
mcts_logger.log("MCTS", f"Start simulating node {node.id}:")
while node.children:
node = np.random.choice(node.children)
reward, result_dict = await node.run_node(role)
mcts_logger.log("MCTS", f"Simulated node's reward: {reward}")
# TODO: add new insights
return reward
def backpropagate(self, node: Node, reward: dict):
child_node = node
node.update(reward)
node = node.parent
while node is not None:
node.update(reward, child_node)
node, child_node = node.parent, node
def best_path(self, root: Node):
best_child = root
global_best_score = root.normalized_reward["test_score"]
dev_best_score = root.normalized_reward["dev_score"]
def bfs(node: Node, best_score: float, best_child: Node, split: str):
assert split in ["test_score", "dev_score"]
if node not in self.children:
return best_score, best_child
for child in self.children[node]:
score = child.normalized_reward[split]
print(child.id, split, score)
if score > best_score:
best_score = score
best_child = child
best_score, best_child = bfs(child, best_score, best_child, split)
return best_score, best_child
_, global_best_child = bfs(root, global_best_score, best_child, "test_score")
_, dev_best_child = bfs(root, dev_best_score, best_child, "dev_score")
return {"dev_best": dev_best_child, "global_best": global_best_child, "scores": self.get_score_order_dict()}
def get_num_simulations(self):
return self.root_node.visited
def save_node_order(self, node_id: str):
self.node_order.append(node_id)
with open(os.path.join(self.root_node.state["node_dir"], "node_order.json"), "w") as f:
json.dump(self.node_order, f)
def load_node_order(self):
with open(os.path.join(self.root_node.state["node_dir"], "node_order.json"), "r") as f:
self.node_order = json.load(f)
def get_score_order_dict(self):
scores = {"dev": [], "test": [], "dev_raw": [], "test_raw": []}
for node_id in self.node_order:
node = Node(parent=None, state=self.root_node.state, action=None, value=0)
node.id = node_id
node = node.load_node()
scores["dev"].append(node.normalized_reward["dev_score"])
scores["test"].append(node.normalized_reward["test_score"])
scores["dev_raw"].append(node.raw_reward["dev_score"])
scores["test_raw"].append(node.raw_reward["test_score"])
return scores
async def search(self, state: dict, args):
reflection = args.reflection
load_tree = args.load_tree
rollouts = args.rollouts
from_scratch = args.from_scratch
role, root = initialize_di_root_node(state, reflection=reflection)
self.root_node = root
self.instruction_generator = InstructionGenerator(
state=state, use_fixed_insights=self.use_fixed_insights, from_scratch=from_scratch
)
await self.instruction_generator.initialize()
tree_loaded = False
if load_tree:
tree_loaded = self.load_tree()
mcts_logger.log("MCTS", f"Number of simulations: {self.get_num_simulations()}")
mcts_logger.log("MCTS", f"Tree loaded: {tree_loaded}")
if not tree_loaded:
rollouts -= 2 # 2 rollouts for the initial tree
if rollouts < 0:
raise ValueError("Rollouts must be greater than 2 if there is no tree to load")
self.children[root] = []
reward = await self.simulate(root, role)
self.backpropagate(root, reward)
node, reward = await self.expand_and_simulate(root)
# self.backpropagate(node, reward)
self.save_node_order(root.id)
self.save_node_order(node.id)
else:
root = self.root_node
self.load_node_order()
for _ in range(rollouts): # number of rollouts
mcts_logger.log("MCTS", f"Start the next rollout {_+1}")
node = self.select(root)
if node.is_terminal():
if node.raw_value == 0:
reward = await self.simulate(node)
else:
reward = {"test_score": node.raw_value, "score": node.raw_reward["score"]}
mcts_logger.log("MCTS", f"Terminal node's reward: {reward}")
self.backpropagate(node, reward)
else:
node, reward = await self.expand_and_simulate(node)
# self.backpropagate(node, reward)
self.save_node_order(node.id)
return self.best_path(root)
async def expand_and_simulate(self, node: Node):
# Expand and randomly select a child node, then simulate it
if node.visited > 0:
children = await self.expand(node)
node = np.random.choice(children)
reward = await self.simulate(node)
self.backpropagate(node, reward)
return node, reward
def load_tree(self):
def load_children_node(node: Node):
mcts_logger.log("MCTS", f"Load node {node.id}'s child: {node.children}")
if node.is_terminal() or not node.children:
return
for child in node.children:
child.load_node()
self.children[child] = child.children
load_children_node(child)
# Load all pkl files in the node_dir
all_pkl_files = os.listdir(self.root_node.state["node_dir"])
all_pkl_files = [f for f in all_pkl_files if f.endswith(".pkl")]
if os.path.exists(os.path.join(self.root_node.state["node_dir"], "Node-0.pkl")):
with open(os.path.join(self.root_node.state["node_dir"], "Node-0.pkl"), "rb") as f:
self.root_node = pickle.load(f)
self.children[self.root_node] = self.root_node.children
load_children_node(self.root_node)
if self.children:
return True
return False
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/sela/data/dataset.py | metagpt/ext/sela/data/dataset.py | import argparse
import asyncio
import json
import os
from pathlib import Path
import openml
import pandas as pd
import yaml
from sklearn.model_selection import train_test_split
from metagpt.ext.sela.insights.solution_designer import SolutionDesigner
from metagpt.ext.sela.utils import DATA_CONFIG
BASE_USER_REQUIREMENT = """
This is a {datasetname} dataset. Your goal is to predict the target column `{target_col}`.
Perform data analysis, data preprocessing, feature engineering, and modeling to predict the target.
Report {metric} on the eval data. Do not plot or make any visualizations.
"""
USE_AG = """
- Please use autogluon for model training with presets='medium_quality', time_limit=None, give dev dataset to tuning_data, and use right eval_metric.
"""
TEXT_MODALITY = """
- You could use models from transformers library for this text dataset.
- Use gpu if available for faster training.
"""
IMAGE_MODALITY = """
- You could use models from transformers/torchvision library for this image dataset.
- Use gpu if available for faster training.
"""
STACKING = """
- To avoid overfitting, train a weighted ensemble model such as StackingClassifier or StackingRegressor.
- You could do some quick model prototyping to see which models work best and then use them in the ensemble.
"""
SPECIAL_INSTRUCTIONS = {"ag": USE_AG, "stacking": STACKING, "text": TEXT_MODALITY, "image": IMAGE_MODALITY}
DI_INSTRUCTION = """
## Attention
1. Please do not leak the target label in any form during training.
2. Test set does not have the target column.
3. When conducting data exploration or analysis, print out the results of your findings.
4. You should perform transformations on train, dev, and test sets at the same time (it's a good idea to define functions for this and avoid code repetition).
5. When scaling or transforming features, make sure the target column is not included.
6. You could utilize dev set to validate and improve model training. {special_instruction}
## Saving Dev and Test Predictions
1. Save the prediction results of BOTH the dev set and test set in `dev_predictions.csv` and `test_predictions.csv` respectively in the output directory.
- Both files should contain a single column named `target` with the predicted values.
2. Make sure the prediction results are in the same format as the target column in the original training set.
- For instance, if the original target column is a list of string, the prediction results should also be strings.
## Output Performance
Print the train and dev set performance in the last step.
# Output dir
{output_dir}
"""
TASK_PROMPT = """
# User requirement
{user_requirement}
{additional_instruction}
# Data dir
train set (with labels): {train_path}
dev set (with labels): {dev_path}
test set (without labels): {test_path}
dataset description: {data_info_path} (During EDA, you can use this file to get additional information about the dataset)
"""
SEED = 100
TRAIN_TEST_SPLIT = 0.8
TRAIN_DEV_SPLIT = 0.75
OPENML_DATASET_IDS = [
# reg
41021,
42727,
41980,
42225,
531,
# cls
41143,
31,
42733,
41162,
1067,
# multi cls
40498,
40982,
12,
40984,
4538,
]
CUSTOM_DATASETS = [
("04_titanic", "Survived"),
("05_house-prices-advanced-regression-techniques", "SalePrice"),
("06_santander-customer-transaction-prediction", "target"),
("07_icr-identify-age-related-conditions", "Class"),
]
DSAGENT_DATASETS = [("concrete-strength", "Strength"), ("smoker-status", "smoking"), ("software-defects", "defects")]
def get_split_dataset_path(dataset_name, config):
datasets_dir = config["datasets_dir"]
if dataset_name in config["datasets"]:
dataset = config["datasets"][dataset_name]
data_path = os.path.join(datasets_dir, dataset["dataset"])
split_datasets = {
"train": os.path.join(data_path, "split_train.csv"),
"dev": os.path.join(data_path, "split_dev.csv"),
"dev_wo_target": os.path.join(data_path, "split_dev_wo_target.csv"),
"dev_target": os.path.join(data_path, "split_dev_target.csv"),
"test": os.path.join(data_path, "split_test.csv"),
"test_wo_target": os.path.join(data_path, "split_test_wo_target.csv"),
"test_target": os.path.join(data_path, "split_test_target.csv"),
}
return split_datasets
else:
raise ValueError(
f"Dataset {dataset_name} not found in config file. Available datasets: {config['datasets'].keys()}"
)
def get_user_requirement(task_name, config):
# datasets_dir = config["datasets_dir"]
if task_name in config["datasets"]:
dataset = config["datasets"][task_name]
# data_path = os.path.join(datasets_dir, dataset["dataset"])
user_requirement = dataset["user_requirement"]
return user_requirement
else:
raise ValueError(
f"Dataset {task_name} not found in config file. Available datasets: {config['datasets'].keys()}"
)
def save_datasets_dict_to_yaml(datasets_dict, name="datasets.yaml"):
with open(name, "w") as file:
yaml.dump(datasets_dict, file)
def create_dataset_dict(dataset):
dataset_dict = {
"dataset": dataset.name,
"user_requirement": dataset.create_base_requirement(),
"metric": dataset.get_metric(),
"target_col": dataset.target_col,
}
return dataset_dict
def generate_di_instruction(output_dir, special_instruction):
if special_instruction:
special_instruction_prompt = SPECIAL_INSTRUCTIONS[special_instruction]
else:
special_instruction_prompt = ""
additional_instruction = DI_INSTRUCTION.format(
output_dir=output_dir, special_instruction=special_instruction_prompt
)
return additional_instruction
def generate_task_requirement(task_name, data_config, is_di=True, special_instruction=None):
user_requirement = get_user_requirement(task_name, data_config)
split_dataset_path = get_split_dataset_path(task_name, data_config)
train_path = split_dataset_path["train"]
dev_path = split_dataset_path["dev"]
test_path = split_dataset_path["test_wo_target"]
work_dir = data_config["work_dir"]
output_dir = f"{work_dir}/{task_name}"
datasets_dir = data_config["datasets_dir"]
data_info_path = f"{datasets_dir}/{task_name}/dataset_info.json"
if is_di:
additional_instruction = generate_di_instruction(output_dir, special_instruction)
else:
additional_instruction = ""
user_requirement = TASK_PROMPT.format(
user_requirement=user_requirement,
train_path=train_path,
dev_path=dev_path,
test_path=test_path,
additional_instruction=additional_instruction,
data_info_path=data_info_path,
)
print(user_requirement)
return user_requirement
class ExpDataset:
description: str = None
metadata: dict = None
dataset_dir: str = None
target_col: str = None
name: str = None
def __init__(self, name, dataset_dir, **kwargs):
self.name = name
self.dataset_dir = dataset_dir
self.target_col = kwargs.get("target_col", None)
self.force_update = kwargs.get("force_update", False)
self.save_dataset(target_col=self.target_col)
def check_dataset_exists(self):
fnames = [
"split_train.csv",
"split_dev.csv",
"split_test.csv",
"split_dev_wo_target.csv",
"split_dev_target.csv",
"split_test_wo_target.csv",
"split_test_target.csv",
]
for fname in fnames:
if not os.path.exists(Path(self.dataset_dir, self.name, fname)):
return False
return True
def check_datasetinfo_exists(self):
return os.path.exists(Path(self.dataset_dir, self.name, "dataset_info.json"))
def get_raw_dataset(self):
raw_dir = Path(self.dataset_dir, self.name, "raw")
train_df = None
test_df = None
if not os.path.exists(Path(raw_dir, "train.csv")):
raise FileNotFoundError(f"Raw dataset `train.csv` not found in {raw_dir}")
else:
train_df = pd.read_csv(Path(raw_dir, "train.csv"))
if os.path.exists(Path(raw_dir, "test.csv")):
test_df = pd.read_csv(Path(raw_dir, "test.csv"))
return train_df, test_df
def get_dataset_info(self):
raw_df = pd.read_csv(Path(self.dataset_dir, self.name, "raw", "train.csv"))
metadata = {
"NumberOfClasses": raw_df[self.target_col].nunique(),
"NumberOfFeatures": raw_df.shape[1],
"NumberOfInstances": raw_df.shape[0],
"NumberOfInstancesWithMissingValues": int(raw_df.isnull().any(axis=1).sum()),
"NumberOfMissingValues": int(raw_df.isnull().sum().sum()),
"NumberOfNumericFeatures": raw_df.select_dtypes(include=["number"]).shape[1],
"NumberOfSymbolicFeatures": raw_df.select_dtypes(include=["object"]).shape[1],
}
df_head_text = self.get_df_head(raw_df)
dataset_info = {
"name": self.name,
"description": "",
"target_col": self.target_col,
"metadata": metadata,
"df_head": df_head_text,
}
return dataset_info
def get_df_head(self, raw_df):
return raw_df.head().to_string(index=False)
def get_metric(self):
dataset_info = self.get_dataset_info()
num_classes = dataset_info["metadata"]["NumberOfClasses"]
if num_classes == 2:
metric = "f1 binary"
elif 2 < num_classes <= 200:
metric = "f1 weighted"
elif num_classes > 200 or num_classes == 0:
metric = "rmse"
else:
raise ValueError(f"Number of classes {num_classes} not supported")
return metric
def create_base_requirement(self):
metric = self.get_metric()
req = BASE_USER_REQUIREMENT.format(datasetname=self.name, target_col=self.target_col, metric=metric)
return req
def save_dataset(self, target_col):
df, test_df = self.get_raw_dataset()
if not self.check_dataset_exists() or self.force_update:
print(f"Saving Dataset {self.name} in {self.dataset_dir}")
self.split_and_save(df, target_col, test_df=test_df)
else:
print(f"Dataset {self.name} already exists")
if not self.check_datasetinfo_exists() or self.force_update:
print(f"Saving Dataset info for {self.name}")
dataset_info = self.get_dataset_info()
self.save_datasetinfo(dataset_info)
else:
print(f"Dataset info for {self.name} already exists")
def save_datasetinfo(self, dataset_info):
with open(Path(self.dataset_dir, self.name, "dataset_info.json"), "w", encoding="utf-8") as file:
# utf-8 encoding is required
json.dump(dataset_info, file, indent=4, ensure_ascii=False)
def save_split_datasets(self, df, split, target_col=None):
path = Path(self.dataset_dir, self.name)
df.to_csv(Path(path, f"split_{split}.csv"), index=False)
if target_col:
df_wo_target = df.drop(columns=[target_col])
df_wo_target.to_csv(Path(path, f"split_{split}_wo_target.csv"), index=False)
df_target = df[[target_col]].copy()
if target_col != "target":
df_target["target"] = df_target[target_col]
df_target = df_target.drop(columns=[target_col])
df_target.to_csv(Path(path, f"split_{split}_target.csv"), index=False)
def split_and_save(self, df, target_col, test_df=None):
if not target_col:
raise ValueError("Target column not provided")
if test_df is None:
train, test = train_test_split(df, test_size=1 - TRAIN_TEST_SPLIT, random_state=SEED)
else:
train = df
test = test_df
train, dev = train_test_split(train, test_size=1 - TRAIN_DEV_SPLIT, random_state=SEED)
self.save_split_datasets(train, "train")
self.save_split_datasets(dev, "dev", target_col)
self.save_split_datasets(test, "test", target_col)
class OpenMLExpDataset(ExpDataset):
def __init__(self, name, dataset_dir, dataset_id, **kwargs):
self.dataset_id = dataset_id
self.dataset = openml.datasets.get_dataset(
self.dataset_id, download_data=False, download_qualities=False, download_features_meta_data=True
)
self.name = self.dataset.name
self.target_col = self.dataset.default_target_attribute
super().__init__(self.name, dataset_dir, target_col=self.target_col, **kwargs)
def get_raw_dataset(self):
dataset = self.dataset
dataset_df, *_ = dataset.get_data()
raw_dir = Path(self.dataset_dir, self.name, "raw")
os.makedirs(raw_dir, exist_ok=True)
dataset_df.to_csv(Path(raw_dir, "train.csv"), index=False)
return dataset_df, None
def get_dataset_info(self):
dataset_info = super().get_dataset_info()
dataset = self.dataset
dataset_info["name"] = dataset.name
dataset_info["description"] = dataset.description
dataset_info["metadata"].update(dataset.qualities)
return dataset_info
async def process_dataset(dataset, solution_designer: SolutionDesigner, save_analysis_pool, datasets_dict):
if save_analysis_pool:
await solution_designer.generate_solutions(dataset.get_dataset_info(), dataset.name)
dataset_dict = create_dataset_dict(dataset)
datasets_dict["datasets"][dataset.name] = dataset_dict
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--force_update", action="store_true", help="Force update datasets")
parser.add_argument("--save_analysis_pool", action="store_true", help="Save analysis pool")
parser.add_argument(
"--no_save_analysis_pool", dest="save_analysis_pool", action="store_false", help="Do not save analysis pool"
)
parser.set_defaults(save_analysis_pool=True)
return parser.parse_args()
if __name__ == "__main__":
datasets_dir = DATA_CONFIG["datasets_dir"]
args = parse_args()
force_update = args.force_update
save_analysis_pool = args.save_analysis_pool
datasets_dict = {"datasets": {}}
solution_designer = SolutionDesigner()
for dataset_id in OPENML_DATASET_IDS:
openml_dataset = OpenMLExpDataset("", datasets_dir, dataset_id, force_update=force_update)
asyncio.run(process_dataset(openml_dataset, solution_designer, save_analysis_pool, datasets_dict))
for dataset_name, target_col in CUSTOM_DATASETS:
custom_dataset = ExpDataset(dataset_name, datasets_dir, target_col=target_col, force_update=force_update)
asyncio.run(process_dataset(custom_dataset, solution_designer, save_analysis_pool, datasets_dict))
for dataset_name, target_col in DSAGENT_DATASETS:
custom_dataset = ExpDataset(dataset_name, datasets_dir, target_col=target_col, force_update=force_update)
asyncio.run(process_dataset(custom_dataset, solution_designer, save_analysis_pool, datasets_dict))
save_datasets_dict_to_yaml(datasets_dict)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/sela/data/hf_data.py | metagpt/ext/sela/data/hf_data.py | import asyncio
import io
import os
from pathlib import Path
import pandas as pd
from datasets import load_dataset
from PIL import Image
from metagpt.ext.sela.data.dataset import (
ExpDataset,
parse_args,
process_dataset,
save_datasets_dict_to_yaml,
)
from metagpt.ext.sela.insights.solution_designer import SolutionDesigner
from metagpt.ext.sela.utils import DATA_CONFIG
HFDATSETS = [
{"name": "sms_spam", "dataset_name": "ucirvine/sms_spam", "target_col": "label", "modality": "text"},
{"name": "banking77", "dataset_name": "PolyAI/banking77", "target_col": "label", "modality": "text"},
{"name": "gnad10", "dataset_name": "community-datasets/gnad10", "target_col": "label", "modality": "text"},
{
"name": "oxford-iiit-pet",
"dataset_name": "timm/oxford-iiit-pet",
"image_col": "image",
"target_col": "label",
"modality": "image",
},
{
"name": "stanford_cars",
"dataset_name": "tanganke/stanford_cars",
"image_col": "image",
"target_col": "label",
"modality": "image",
},
{
"name": "fashion_mnist",
"dataset_name": "zalando-datasets/fashion_mnist",
"image_col": "image",
"target_col": "label",
"modality": "image",
},
]
class HFExpDataset(ExpDataset):
train_ratio = 0.6
dev_ratio = 0.2
test_ratio = 0.2
def __init__(self, name, dataset_dir, dataset_name, **kwargs):
self.name = name
self.dataset_dir = dataset_dir
self.dataset_name = dataset_name
self.modality = kwargs.get("modality", "")
self.target_col = kwargs.get("target_col", "label")
self.image_col = kwargs.get("image_col", "image")
self.dataset = load_dataset(self.dataset_name, trust_remote_code=True)
super().__init__(self.name, dataset_dir, **kwargs)
def get_raw_dataset(self):
raw_dir = Path(self.dataset_dir, self.name, "raw")
raw_dir.mkdir(parents=True, exist_ok=True)
if os.path.exists(Path(raw_dir, "train.csv")):
df = pd.read_csv(Path(raw_dir, "train.csv"), encoding="utf-8")
else:
df = self.dataset["train"].to_pandas()
if self.modality == "image":
df = self.save_images_and_update_df(df, raw_dir, "train")
df.to_csv(Path(raw_dir, "train.csv"), index=False, encoding="utf-8")
if os.path.exists(Path(raw_dir, "test.csv")):
test_df = pd.read_csv(Path(raw_dir, "test.csv"), encoding="utf-8")
else:
if self.dataset and "test" in self.dataset:
test_df = self.dataset["test"].to_pandas()
if self.modality == "image":
test_df = self.save_images_and_update_df(test_df, raw_dir, "test")
test_df.to_csv(Path(raw_dir, "test.csv"), index=False, encoding="utf-8")
else:
test_df = None
return df, test_df
def save_images_and_update_df(self, df, raw_dir, split):
abs_image_dir = Path(raw_dir, f"{split}_images")
rel_image_dir = f"raw/{split}_images"
abs_image_dir.mkdir(parents=True, exist_ok=True)
def process_image(idx, row):
image_bytes = row[self.image_col]["bytes"]
image = Image.open(io.BytesIO(image_bytes))
if image.mode == "RGBA":
image = image.convert("RGB")
img_path = Path(abs_image_dir, f"{idx}.jpg")
rel_img_path = f"{rel_image_dir}/{idx}.jpg"
image.save(img_path)
return rel_img_path
df["image"] = df.apply(lambda row: process_image(row.name, row), axis=1)
return df
def get_df_head(self, raw_df):
examples = []
for i in range(5):
examples.append(raw_df.iloc[i].to_dict())
return examples
def get_dataset_info(self):
dataset_info = super().get_dataset_info()
dataset = self.dataset
dataset_info["description"] = dataset["train"].info.description
return dataset_info
if __name__ == "__main__":
dataset_dir = DATA_CONFIG["datasets_dir"]
args = parse_args()
force_update = args.force_update
save_analysis_pool = args.save_analysis_pool
datasets_dict = {"datasets": {}}
solution_designer = SolutionDesigner()
for dataset_meta in HFDATSETS:
hf_dataset = HFExpDataset(
dataset_meta["name"],
dataset_dir,
dataset_meta["dataset_name"],
target_col=dataset_meta["target_col"],
image_col=dataset_meta.get("image_col", ""),
force_update=force_update,
modality=dataset_meta["modality"],
)
asyncio.run(process_dataset(hf_dataset, solution_designer, save_analysis_pool, datasets_dict))
save_datasets_dict_to_yaml(datasets_dict, "hf_datasets.yaml")
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/sela/data/custom_task.py | metagpt/ext/sela/data/custom_task.py | import os
from metagpt.ext.sela.data.dataset import SPECIAL_INSTRUCTIONS
from metagpt.ext.sela.runner.mle_bench.instructions import (
ADDITIONAL_NOTES,
INSTRUCTIONS,
INSTRUCTIONS_OBFUSCATED,
)
MLE_BENCH_FILES = ["description.md", "description_obfuscated.md"]
MLE_REQUIREMENTS = """
{instructions}
{additonal_notes}
COMPETITION INSTRUCTIONS
------
{task_description}
## More Instructions
- You should split the training data into train and dev set with a seed of 42.
- You should use the dev set to improve your model. Print the final dev set score after training.
- output_dir: {output_dir}
- Besides `submission.csv`, you should also save your `test_predictions.csv` and `dev_predictions.csv` in the output directory.
- Note that `test_predictions.csv` should be identical to `submission.csv`.
- Perform data analysis, data preprocessing, feature engineering, and modeling to predict the target. {special_instruction}
**Do not make any plots or visualizations.**
"""
def get_mle_task_id(dataset_dir):
return dataset_dir.split("/")[-3]
def get_mle_is_lower_better(task):
from mlebench.data import get_leaderboard
from mlebench.registry import registry
competition = registry.get_competition(task)
competition_leaderboard = get_leaderboard(competition)
return competition.grader.is_lower_better(competition_leaderboard)
def get_mle_bench_requirements(dataset_dir, data_config, special_instruction, obfuscated=False):
work_dir = data_config["work_dir"]
task = get_mle_task_id(dataset_dir)
output_dir = f"{work_dir}/{task}"
final_output_dir = f"{work_dir}/submission"
os.makedirs(output_dir, exist_ok=True)
if special_instruction:
special_instruction = SPECIAL_INSTRUCTIONS[special_instruction]
else:
special_instruction = ""
if obfuscated:
instructions = INSTRUCTIONS_OBFUSCATED.format(dataset_dir=dataset_dir, output_dir=final_output_dir)
task_file = "description_obfuscated.md"
else:
instructions = INSTRUCTIONS.format(dataset_dir=dataset_dir, output_dir=output_dir)
task_file = "description.md"
with open(os.path.join(dataset_dir, task_file), encoding="utf-8") as f:
task_description = f.read()
mle_requirement = MLE_REQUIREMENTS.format(
instructions=instructions,
additonal_notes=ADDITIONAL_NOTES,
task_description=task_description,
output_dir=output_dir,
special_instruction=special_instruction,
)
print(mle_requirement)
return mle_requirement
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/sela/evaluation/visualize_mcts.py | metagpt/ext/sela/evaluation/visualize_mcts.py | import textwrap
import matplotlib.pyplot as plt
import networkx as nx
from metagpt.ext.sela.search.tree_search import Node
NODE_TEMPLATE = """\
[Node {id}]
Plans:
{plans}
Simulated: {simulated}
Score: {score}, Visits: {num_visits}
"""
NODE_SIZE = 12000
NODE_FONT_SIZE = 18
def get_role_plans(role):
plans = role.planner.plan.tasks
instruct_plans = [f"{i+1}. {task.instruction}" for i, task in enumerate(plans)]
return instruct_plans
def get_tree_text(node: Node):
role_dict = {}
code_set = set()
def load_role(node):
if node.id not in role_dict:
role_dict[node.id] = node.load_role()
return role_dict[node.id]
def visualize_node(node: Node, previous_plans=None):
role = load_role(node)
node_id = node.id
plans = role.planner.plan.tasks
instruct_plans = [f"{i+1}. {task.instruction}" for i, task in enumerate(plans)]
if previous_plans is not None:
instruct_plans = [plan for plan, prev_plan in zip(instruct_plans, previous_plans) if plan != prev_plan]
instruct_plans_text = "\n".join(instruct_plans)
simulated = role.state_saved
score = f"avg score: {node.avg_value()}, simulated score: {node.raw_reward}"
num_visits = node.visited
return NODE_TEMPLATE.format(
id=node_id, plans=instruct_plans_text, simulated=simulated, score=score, num_visits=num_visits
)
def visualize_tree_text(node, depth=0, previous_plans=None):
text = ""
if node is not None:
text += visualize_node(node, previous_plans)
role = load_role(node)
code_set.update({task.instruction for task in role.planner.plan.tasks})
previous_plans = get_role_plans(role)
for child in node.children:
text += textwrap.indent(visualize_tree_text(child, depth + 1, previous_plans), "\t")
return text
num_simulations = node.visited
text = f"Number of simulations: {num_simulations}\n"
text += visualize_tree_text(node)
return text, len(code_set)
def get_node_color(node):
if node["visits"] == 0:
return "#D3D3D3"
else:
# The higher the avg_value, the more intense the color
# avg_value is between 0 and 1
avg_value = node["avg_value"]
# Convert avg_value to a color ranging from red (low) to green (high)
red = int(255 * (1 - avg_value))
green = int(255 * avg_value)
return f"#{red:02X}{green:02X}00"
def visualize_tree(graph, show_instructions=False, save_path=""):
# Use a hierarchical layout for tree-like visualization
pos = nx.spring_layout(graph, k=0.9, iterations=50)
plt.figure(figsize=(30, 20)) # Further increase figure size for better visibility
# Calculate node levels
root = "0"
levels = nx.single_source_shortest_path_length(graph, root)
max_level = max(levels.values())
# Adjust y-coordinates based on levels and x-coordinates to prevent overlap
nodes_by_level = {}
for node, level in levels.items():
if level not in nodes_by_level:
nodes_by_level[level] = []
nodes_by_level[level].append(node)
for level, nodes in nodes_by_level.items():
y = 1 - level / max_level
x_step = 1.0 / (len(nodes) + 1)
for i, node in enumerate(sorted(nodes)):
pos[node] = ((i + 1) * x_step, y)
# Draw edges
nx.draw_networkx_edges(graph, pos, edge_color="gray", arrows=True, arrowsize=40, width=3)
# Draw nodes
node_colors = [get_node_color(graph.nodes[node]) for node in graph.nodes]
nx.draw_networkx_nodes(graph, pos, node_size=NODE_SIZE, node_color=node_colors)
# Add labels to nodes
labels = nx.get_node_attributes(graph, "label")
nx.draw_networkx_labels(graph, pos, labels, font_size=NODE_FONT_SIZE)
if show_instructions:
# Add instructions to the right side of nodes
instructions = nx.get_node_attributes(graph, "instruction")
for node, (x, y) in pos.items():
wrapped_text = textwrap.fill(instructions[node], width=30) # Adjust width as needed
plt.text(x + 0.05, y, wrapped_text, fontsize=15, ha="left", va="center")
plt.title("MCTS Tree Visualization", fontsize=40)
plt.axis("off") # Turn off axis
plt.tight_layout()
if save_path:
plt.savefig(save_path)
plt.show()
def build_tree_recursive(graph, parent_id, node, node_order, start_task_id=2):
"""
Recursively builds the entire tree starting from the root node.
Adds nodes and edges to the NetworkX graph.
"""
role = node.load_role()
depth = node.get_depth()
if depth == 0:
instruction = "\n\n".join([role.planner.plan.tasks[i].instruction for i in range(start_task_id)])
else:
instruction = role.planner.plan.tasks[depth + start_task_id - 1].instruction
print(instruction)
# Add the current node with attributes to the graph
dev_score = node.raw_reward.get("dev_score", 0) * 100
avg_score = node.avg_value() * 100
order = node_order.index(node.id) if node.id in node_order else ""
graph.add_node(
parent_id,
label=f"{node.id}\nAvg: {avg_score:.1f}\nScore: {dev_score:.1f}\nVisits: {node.visited}\nOrder: {order}",
avg_value=node.avg_value(),
dev_score=dev_score,
visits=node.visited,
instruction=instruction,
)
# Stopping condition: if the node has no children, return
if not node.children:
return
# Recursively create all child nodes
for i, child in enumerate(node.children):
child_id = f"{parent_id}-{i}"
graph.add_edge(parent_id, child_id)
build_tree_recursive(graph, child_id, child, node_order)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/sela/evaluation/evaluation.py | metagpt/ext/sela/evaluation/evaluation.py | from pathlib import Path
import numpy as np
from sklearn.metrics import accuracy_score, f1_score, mean_squared_error, roc_auc_score
def evaluate_score(pred, gt, metric):
if metric == "accuracy":
return accuracy_score(gt, pred)
elif metric == "f1":
unique_classes = sorted(list(np.unique(gt)))
if 1 in unique_classes and 0 in unique_classes:
pos_label = 1
else:
pos_label = unique_classes[0] if len(unique_classes) == 2 else None
return f1_score(gt, pred, pos_label=pos_label)
elif metric == "f1 weighted":
return f1_score(gt, pred, average="weighted")
elif metric == "roc_auc":
return roc_auc_score(gt, pred)
elif metric == "rmse":
return mean_squared_error(gt, pred, squared=False)
elif metric == "log rmse":
return mean_squared_error(np.log1p(gt), np.log1p(pred), squared=False)
else:
raise ValueError(f"Metric {metric} not supported")
def node_evaluate_score_sela(node):
preds = node.get_and_move_predictions("test")["target"]
gt = node.get_gt("test")["target"]
metric = node.state["dataset_config"]["metric"]
return evaluate_score(preds, gt, metric)
def node_evaluate_score_mlebench(node):
# TODO
from mlebench.grade import grade_csv
from mlebench.registry import registry
competition_id = node.state["task"]
data_dir = Path(node.state["custom_dataset_dir"]).parent.parent.parent # prepared/public/../../../
pred_path = node.get_predictions_path("test")
new_registry = registry.set_data_dir(data_dir)
competition = new_registry.get_competition(competition_id)
submission = Path(pred_path)
report = grade_csv(submission, competition).to_dict()
report["submission_path"] = str(submission)
return report
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/werewolf/schema.py | metagpt/ext/werewolf/schema.py | from typing import Any
from pydantic import BaseModel, Field, field_validator
from metagpt.schema import Message
from metagpt.utils.common import any_to_str_set
class RoleExperience(BaseModel):
id: str = ""
name: str = ""
profile: str
reflection: str
instruction: str = ""
response: str
outcome: str = ""
round_id: str = ""
game_setup: str = ""
version: str = ""
def rag_key(self) -> str:
"""For search"""
return self.reflection
class WwMessage(Message):
# Werewolf Message
restricted_to: set[str] = Field(default=set(), validate_default=True)
@field_validator("restricted_to", mode="before")
@classmethod
def check_restricted_to(cls, restricted_to: Any):
return any_to_str_set(restricted_to if restricted_to else set())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/werewolf/__init__.py | metagpt/ext/werewolf/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/werewolf/werewolf_game.py | metagpt/ext/werewolf/werewolf_game.py | from typing import Any, Optional
from metagpt.actions.add_requirement import UserRequirement
from metagpt.context import Context
from metagpt.environment.werewolf.werewolf_env import WerewolfEnv
from metagpt.ext.werewolf.schema import WwMessage
from metagpt.team import Team
class WerewolfGame(Team):
"""Use the "software company paradigm" to hold a werewolf game"""
env: Optional[WerewolfEnv] = None
def __init__(self, context: Context = None, **data: Any):
super(Team, self).__init__(**data)
ctx = context or Context()
if not self.env:
self.env = WerewolfEnv(context=ctx)
else:
self.env.context = ctx # The `env` object is allocated by deserialization
def run_project(self, idea):
"""Run a project from user instruction."""
self.idea = idea
self.env.publish_message(
WwMessage(role="User", content=idea, cause_by=UserRequirement, restricted_to={"Moderator"})
)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/werewolf/actions/common_actions.py | metagpt/ext/werewolf/actions/common_actions.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
import json
from tenacity import retry, stop_after_attempt, wait_fixed
from metagpt.actions import Action
from metagpt.logs import logger
from metagpt.utils.common import parse_json_code_block
def log_and_parse_json(name: str, rsp: str) -> dict:
rsp = rsp.replace("\n", " ")
logger.debug(f"{name} result: {rsp}")
json_blocks = parse_json_code_block(rsp)
rsp_json = json.loads(json_blocks[0])
return rsp_json
class Speak(Action):
"""Action: Any speak action in a game"""
PROMPT_TEMPLATE: str = """
{
"BACKGROUND": "It's a Werewolf game, in this game, we have 2 werewolves, 2 villagers, 1 guard, 1 witch, 1 seer. You are __profile__. Note that villager, seer, guard and witch are all in villager side, they have the same objective. Werewolves can collectively hunt ONE player at night."
,"HISTORY": "You have knowledge to the following conversation: __context__"
,"ATTENTION": "You can NOT VOTE a player who is NOT ALIVE now!"
,"REFLECTION": "__reflection__"
,"STRATEGY": __strategy__
,"PAST_EXPERIENCES": "__experiences__"
,"MODERATOR_INSTRUCTION": __latest_instruction__,
,"RULE": "Please follow the moderator's latest instruction, figure out if you need to speak your opinion or directly to vote:
1. If the instruction is to SPEAK, speak in 200 words. Remember the goal of your role and try to achieve it using your speech;
2. If the instruction is to VOTE, you MUST vote and ONLY say 'I vote to eliminate PlayerX', replace PlayerX with the actual player name, DO NOT include any other words."
,"OUTPUT_FORMAT":
{
"ROLE": "Your role, in this case, __profile__"
,"PLAYER_NAME": "Your name, in this case, __name__"
,"LIVING_PLAYERS": "List living players based on MODERATOR_INSTRUCTION. Return a json LIST datatype."
,"THOUGHTS": "Based on `MODERATOR_INSTRUCTION` and `RULE`, carefully think about what to say or vote so that your chance of win as __profile__ maximizes.
If you find similar situation in `PAST_EXPERIENCES`, you may draw lessons from them to refine your strategy, take better vote action, or improve your speech.
Give your step-by-step thought process, you should think no more than 3 steps. For example: My step-by-step thought process:..."
,"RESPONSE": "Based on `MODERATOR_INSTRUCTION`, `RULE`, and the 'THOUGHTS' you had, express your opinion or cast a vote."
}
}
"""
STRATEGY: str = """
Decide whether to reveal your identity based on benefits vs. risks, provide useful information, and vote to eliminate the most suspicious.
If you have special abilities, pay attention to those who falsely claims your role, for they are probably werewolves.
"""
name: str = "Speak"
@retry(stop=stop_after_attempt(2), wait=wait_fixed(1))
async def run(
self,
profile: str,
name: str,
context: str,
latest_instruction: str,
reflection: str = "",
experiences: str = "",
):
prompt = (
self.PROMPT_TEMPLATE.replace("__context__", context)
.replace("__profile__", profile)
.replace("__name__", name)
.replace("__latest_instruction__", latest_instruction)
.replace("__strategy__", self.STRATEGY)
.replace("__reflection__", reflection)
.replace("__experiences__", experiences)
)
rsp = await self._aask(prompt)
rsp_json = log_and_parse_json(self.name, rsp)
return rsp_json["RESPONSE"]
class NighttimeWhispers(Action):
"""
Action: nighttime whispers with thinking processes
Usage Example:
class Hunt(NighttimeWhispers):
def __init__(self, name="Hunt", context=None, llm=None):
super().__init__(name, context, llm)
class Protect(NighttimeWhispers):
def __init__(self, name="Protect", context=None, llm=None):
super().__init__(name, context, llm)
class Verify(NighttimeWhispers):
def __init__(self, name="Verify", context=None, llm=None):
super().__init__(name, context, llm)
class Save(NighttimeWhispers):
def __init__(self, name="Save", context=None, llm=None):
super().__init__(name, context, llm)
def _update_prompt_json(self, prompt_json: dict, profile: str, name: str, context: str, **kwargs):
del prompt_json['ACTION']
del prompt_json['ATTENTION']
prompt_json["OUTPUT_FORMAT"]["THOUGHTS"] = "It is night time. Return the thinking steps of your decision of whether to save the player JUST be killed at this night."
prompt_json["OUTPUT_FORMAT"]["RESPONSE"] = "Follow the Moderator's instruction, decide whether you want to save that person or not. Return SAVE or PASS."
return prompt_json
class Poison(NighttimeWhispers):
def __init__(self, name="Poison", context=None, llm=None):
super().__init__(name, context, llm)
def _update_prompt_json(self, prompt_json: dict, profile: str, name: str, context: str, **kwargs):
prompt_json["OUTPUT_FORMAT"]["RESPONSE"] += "Or if you want to PASS, return PASS."
return prompt_json
"""
PROMPT_TEMPLATE: str = """
{
"BACKGROUND": "It's a Werewolf game, in this game, we have 2 werewolves, 2 villagers, 1 guard, 1 witch, 1 seer. You are __profile__. Note that villager, seer, guard and witch are all in villager side, they have the same objective. Werewolves can collectively hunt ONE player at night."
,"HISTORY": "You have knowledge to the following conversation: __context__"
,"ACTION": "Choose one living player to __action__."
,"ATTENTION": "1. You can only __action__ a player who is alive this night! And you can not __action__ a player who is dead this night! 2. `HISTORY` is all the information you observed, DONT hallucinate other player actions!"
,"REFLECTION": "__reflection__"
,"STRATEGY": "__strategy__"
,"PAST_EXPERIENCES": "__experiences__"
,"OUTPUT_FORMAT":
{
"ROLE": "Your role, in this case, __profile__"
,"PLAYER_NAME": "Your name, in this case, __name__"
,"LIVING_PLAYERS": "List the players who is alive based on moderator's latest instruction. Return a json LIST datatype."
,"THOUGHTS": "Choose one living player from `LIVING_PLAYERS` to __action__ this night. Return the reason why you choose to __action__ this player. If you observe nothing at first night, DONT imagine unexisting player actions! If you find similar situation in `PAST_EXPERIENCES`, you may draw lessons from them to refine your strategy and take better actions. Give your step-by-step thought process, you should think no more than 3 steps. For example: My step-by-step thought process:..."
,"RESPONSE": "As a __profile__, you should choose one living player from `LIVING_PLAYERS` to __action__ this night according to the THOUGHTS you have just now. Return the player name ONLY."
}
}
"""
STRATEGY: str = """
Decide which player is most threatening to you or most needs your support, take your action correspondingly.
"""
name: str = "NightTimeWhispers"
def _construct_prompt_json(
self, role_profile: str, role_name: str, context: str, reflection: str, experiences: str, **kwargs
):
prompt_template = self.PROMPT_TEMPLATE
def replace_string(prompt_json: dict):
k: str
for k in prompt_json.keys():
if isinstance(prompt_json[k], dict):
prompt_json[k] = replace_string(prompt_json[k])
continue
prompt_json[k] = prompt_json[k].replace("__profile__", role_profile)
prompt_json[k] = prompt_json[k].replace("__name__", role_name)
prompt_json[k] = prompt_json[k].replace("__context__", context)
prompt_json[k] = prompt_json[k].replace("__action__", self.name)
prompt_json[k] = prompt_json[k].replace("__strategy__", self.STRATEGY)
prompt_json[k] = prompt_json[k].replace("__reflection__", reflection)
prompt_json[k] = prompt_json[k].replace("__experiences__", experiences)
return prompt_json
prompt_json: dict = json.loads(prompt_template)
prompt_json = replace_string(prompt_json)
prompt_json: dict = self._update_prompt_json(
prompt_json, role_profile, role_name, context, reflection, experiences, **kwargs
)
assert isinstance(prompt_json, dict)
prompt: str = json.dumps(prompt_json, indent=4, ensure_ascii=False)
return prompt
def _update_prompt_json(
self, prompt_json: dict, role_profile: str, role_name: str, context: str, reflection: str, experiences: str
) -> dict:
# one can modify the prompt_json dictionary here
return prompt_json
@retry(stop=stop_after_attempt(2), wait=wait_fixed(1))
async def run(self, context: str, profile: str, name: str, reflection: str = "", experiences: str = ""):
prompt = self._construct_prompt_json(
role_profile=profile, role_name=name, context=context, reflection=reflection, experiences=experiences
)
rsp = await self._aask(prompt)
rsp_json = log_and_parse_json(self.name, rsp)
return f"{self.name} " + rsp_json["RESPONSE"]
class Reflect(Action):
PROMPT_TEMPLATE: str = """
{
"BACKGROUND": "It's a Werewolf game, in this game, we have 2 werewolves, 2 villagers, 1 guard, 1 witch, 1 seer. You are __profile__. Note that villager, seer, guard and witch are all in villager side, they have the same objective. Werewolves can collectively hunt ONE player at night."
,"HISTORY": "You have knowledge to the following conversation: __context__"
,"MODERATOR_INSTRUCTION": __latest_instruction__,
,"OUTPUT_FORMAT" (a json):
{
"ROLE": "Your role, in this case, __profile__"
,"PLAYER_NAME": "Your name, in this case, __name__"
"GAME_STATES": "You are about to follow `MODERATOR_INSTRUCTION`, but before taking any action, analyze each player, including the living and the dead, and summarize the game states.
For each player, your reflection should be a ONE-LINE json covering the following dimension, return a LIST of jsons (return an empty LIST for the first night):
[
{"TARGET": "the player you will analyze, if the player is yourself or your werewolf partner, indicate it" ,"STATUS": "living or dead, if dead, how was he/she possibly killed?", "CLAIMED_ROLE": "claims a role or not, if so, what role, any contradiction to others? If there is no claim, return 'None'", "SIDE_WITH": "sides with which players? If none, return 'None'", "ACCUSE": "accuses which players? If none, return 'None'"}
,{...}
,...
]"
,"REFLECTION": "Based on the whole `GAME_STATES`, return a json (return an empty string for the first night):
{
"Player1": "the true role (werewolf / special role / villager, living or dead) you infer about him/her, and why is this role? If the player is yourself or your werewolf partner, indicate it."
,...
,"Player7": "the true role (werewolf / special role / villager, living or dead) you infer about him/her, and why is this role? If the player is yourself or your werewolf partner, indicate it."
,"GAME_STATE_SUMMARIZATION": "summarize the current situation from your standpoint in one sentence, your summarization should catch the most important information from your reflection, such as conflicts, number of living werewolves, special roles, and villagers."
}"
}
}
"""
name: str = "Reflect"
@retry(stop=stop_after_attempt(2), wait=wait_fixed(1))
async def run(self, profile: str, name: str, context: str, latest_instruction: str):
prompt = (
self.PROMPT_TEMPLATE.replace("__context__", context)
.replace("__profile__", profile)
.replace("__name__", name)
.replace("__latest_instruction__", latest_instruction)
)
rsp = await self._aask(prompt)
rsp_json = log_and_parse_json(self.name, rsp)
return json.dumps(rsp_json["REFLECTION"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/werewolf/actions/seer_actions.py | metagpt/ext/werewolf/actions/seer_actions.py | from metagpt.ext.werewolf.actions.common_actions import NighttimeWhispers
class Verify(NighttimeWhispers):
name: str = "Verify"
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/werewolf/actions/werewolf_actions.py | metagpt/ext/werewolf/actions/werewolf_actions.py | from metagpt.ext.werewolf.actions.common_actions import NighttimeWhispers, Speak
class Hunt(NighttimeWhispers):
name: str = "Hunt"
class Impersonate(Speak):
"""Action: werewolf impersonating a good guy in daytime speak"""
STRATEGY: str = """
Try continuously impersonating a role, such as Seer, Guard, Villager, etc., in order to mislead
other players, make them trust you, and thus hiding your werewolf identity. However, pay attention to what your werewolf partner said,
DONT claim the same role as your werewolf partner. Remmber NOT to reveal your real identity as a werewolf!
"""
name: str = "Impersonate"
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/werewolf/actions/experience_operation.py | metagpt/ext/werewolf/actions/experience_operation.py | import json
from typing import Optional
import chromadb
from pydantic import model_validator
from metagpt.actions import Action
from metagpt.const import DEFAULT_WORKSPACE_ROOT
from metagpt.environment.werewolf.const import RoleType
from metagpt.ext.werewolf.schema import RoleExperience
from metagpt.logs import logger
from metagpt.rag.engines.simple import SimpleEngine
from metagpt.rag.schema import ChromaIndexConfig, ChromaRetrieverConfig
from metagpt.utils.common import read_json_file, write_json_file
DEFAULT_COLLECTION_NAME = "role_reflection" # FIXME: some hard code for now
PERSIST_PATH = DEFAULT_WORKSPACE_ROOT.joinpath("werewolf_game/chroma")
PERSIST_PATH.mkdir(parents=True, exist_ok=True)
class AddNewExperiences(Action):
name: str = "AddNewExperience"
collection_name: str = DEFAULT_COLLECTION_NAME
delete_existing: bool = False
engine: Optional[SimpleEngine] = None
@model_validator(mode="after")
def validate_collection(self):
if self.engine:
return
if self.delete_existing:
try:
# implement engine `DELETE` method later
chromadb.PersistentClient(PERSIST_PATH.as_posix()).delete_collection(self.collection_name)
except Exception as exp:
logger.error(f"delete chroma collection: {self.collection_name} failed, exp: {exp}")
self.engine = SimpleEngine.from_objs(
retriever_configs=[
ChromaRetrieverConfig(
persist_path=PERSIST_PATH, collection_name=self.collection_name, metadata={"hnsw:space": "cosine"}
)
]
)
def run(self, experiences: list[RoleExperience]):
if not experiences:
return
for i, exp in enumerate(experiences):
exp.id = f"{exp.profile}-{exp.name}-step{i}-round_{exp.round_id}"
AddNewExperiences._record_experiences_local(experiences)
self.engine.add_objs(experiences)
def add_from_file(self, file_path):
experiences = read_json_file(file_path)
experiences = [RoleExperience.model_validate(item) for item in experiences]
experiences = [exp for exp in experiences if len(exp.reflection) > 2] # not "" or not '""'
self.engine.add_objs(experiences)
@staticmethod
def _record_experiences_local(experiences: list[RoleExperience]):
round_id = experiences[0].round_id
version = experiences[0].version
version = "test" if not version else version
experiences = [exp.model_dump() for exp in experiences]
experience_path = DEFAULT_WORKSPACE_ROOT.joinpath(f"werewolf_game/experiences/{version}")
experience_path.mkdir(parents=True, exist_ok=True)
save_path = f"{experience_path}/{round_id}.json"
write_json_file(save_path, experiences)
logger.info(f"experiences saved to {save_path}")
class RetrieveExperiences(Action):
name: str = "RetrieveExperiences"
collection_name: str = DEFAULT_COLLECTION_NAME
has_experiences: bool = True
engine: Optional[SimpleEngine] = None
topk: int = 10
@model_validator(mode="after")
def validate_collection(self):
if self.engine:
return
try:
self.engine = SimpleEngine.from_index(
index_config=ChromaIndexConfig(
persist_path=PERSIST_PATH, collection_name=self.collection_name, metadata={"hnsw:space": "cosine"}
),
retriever_configs=[
ChromaRetrieverConfig(
similarity_top_k=self.topk,
persist_path=PERSIST_PATH,
collection_name=self.collection_name,
metadata={"hnsw:space": "cosine"},
)
],
)
except Exception as exp:
logger.warning(f"No experience pool: {self.collection_name}, exp: {exp}")
def run(self, query: str, profile: str, excluded_version: str = "", verbose: bool = False) -> str:
"""_summary_
Args:
query (str): 用当前的reflection作为query去检索过去相似的reflection
profile (str): _description_
Returns:
_type_: _description_
"""
if not self.engine or len(query) <= 2: # not "" or not '""'
logger.warning("engine is None or query too short")
return ""
# ablation experiment logic
if profile == RoleType.WEREWOLF.value: # role werewolf as baseline, don't use experiences
logger.warning("Disable werewolves' experiences")
return ""
results = self.engine.retrieve(query)
logger.info(f"retrieve {profile}'s experiences")
experiences = [res.metadata["obj"] for res in results]
past_experiences = [] # currently use post-process to filter, and later add `filters` in rag
for exp in experiences:
if exp.profile == profile and exp.version != excluded_version:
past_experiences.append(exp)
if verbose and results:
logger.info("past_experiences: {}".format("\n\n".join(past_experiences)))
distances = results[0].score
logger.info(f"distances: {distances}")
template = """
{
"Situation __i__": "__situation__"
,"Moderator's instruction": "__instruction__"
,"Your action or speech during that time": "__response__"
,"Reality": "In fact, it turned out the true roles are __game_step__",
,"Outcome": "You __outcome__ in the end"
}
"""
past_experiences = [
(
template.replace("__i__", str(i))
.replace("__situation__", exp.reflection)
.replace("__instruction__", exp.instruction)
.replace("__response__", exp.response)
.replace("__game_step__", exp.game_setup.replace("0 | Game setup:\n", "").replace("\n", " "))
.replace("__outcome__", exp.outcome)
)
for i, exp in enumerate(past_experiences)
]
logger.info("past_experiences: {}".format("\n".join(past_experiences)))
logger.info("retrieval done")
return json.dumps(past_experiences)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/werewolf/actions/guard_actions.py | metagpt/ext/werewolf/actions/guard_actions.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
from metagpt.ext.werewolf.actions.common_actions import NighttimeWhispers
class Protect(NighttimeWhispers):
name: str = "Protect"
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/werewolf/actions/witch_actions.py | metagpt/ext/werewolf/actions/witch_actions.py | from metagpt.environment.werewolf.const import RoleActionRes
from metagpt.ext.werewolf.actions.common_actions import NighttimeWhispers
class Save(NighttimeWhispers):
name: str = "Save"
def _update_prompt_json(
self, prompt_json: dict, role_profile: str, role_name: str, context: str, reflection: str, experiences: str
) -> dict:
del prompt_json["ACTION"]
del prompt_json["ATTENTION"]
prompt_json["OUTPUT_FORMAT"][
"THOUGHTS"
] = "It is night time. Return the thinking steps of your decision of whether to save the player JUST killed this night."
prompt_json["OUTPUT_FORMAT"][
"RESPONSE"
] = "Follow the Moderator's instruction, decide whether you want to save that person or not. Return SAVE or PASS."
return prompt_json
async def run(self, *args, **kwargs):
rsp = await super().run(*args, **kwargs)
action_name, rsp = rsp.split()
return rsp # 只需回复SAVE或PASS,不需要带上action名
class Poison(NighttimeWhispers):
STRATEGY: str = """
Only poison a player if you are confident he/she is a werewolf. Don't poison a player randomly or at first night.
If someone claims to be the witch, poison him/her, because you are the only witch, he/she can only be a werewolf.
"""
name: str = "Poison"
def _update_prompt_json(
self, prompt_json: dict, role_profile: str, role_name: str, context: str, reflection: str, experiences: str
) -> dict:
prompt_json["OUTPUT_FORMAT"]["RESPONSE"] += "Or if you want to PASS, return PASS."
return prompt_json
async def run(self, *args, **kwargs):
rsp = await super().run(*args, **kwargs)
if RoleActionRes.PASS.value in rsp.lower():
action_name, rsp = rsp.split() # 带PASS,只需回复PASS,不需要带上action名,否则是Poison PlayerX,无需改动
return rsp
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/werewolf/actions/__init__.py | metagpt/ext/werewolf/actions/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
from metagpt.ext.werewolf.actions.werewolf_actions import Hunt, Impersonate
from metagpt.ext.werewolf.actions.guard_actions import Protect
from metagpt.ext.werewolf.actions.seer_actions import Verify
from metagpt.ext.werewolf.actions.witch_actions import Save, Poison
from metagpt.ext.werewolf.actions.common_actions import Speak, NighttimeWhispers, Reflect
from metagpt.ext.werewolf.actions.experience_operation import AddNewExperiences, RetrieveExperiences
from metagpt.ext.werewolf.actions.moderator_actions import InstructSpeak
ACTIONS = {
"Speak": Speak,
"Hunt": Hunt,
"Protect": Protect,
"Verify": Verify,
"Save": Save,
"Poison": Poison,
"Impersonate": Impersonate,
}
__all__ = ["NighttimeWhispers", "Reflect", "AddNewExperiences", "RetrieveExperiences", "InstructSpeak"]
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/werewolf/actions/moderator_actions.py | metagpt/ext/werewolf/actions/moderator_actions.py | from metagpt.actions import Action
from metagpt.environment.werewolf.const import STEP_INSTRUCTIONS
class InstructSpeak(Action):
name: str = "InstructSpeak"
async def run(self, step_idx, living_players, werewolf_players, player_hunted, player_current_dead):
instruction_info = STEP_INSTRUCTIONS.get(
step_idx, {"content": "Unknown instruction.", "send_to": {}, "restricted_to": {}}
)
content = instruction_info["content"]
if "{living_players}" in content and "{werewolf_players}" in content:
content = content.format(
living_players=living_players, werewolf_players=werewolf_players, werewolf_num=len(werewolf_players)
)
if "{living_players}" in content:
content = content.format(living_players=living_players)
if "{werewolf_players}" in content:
content = content.format(werewolf_players=werewolf_players)
if "{player_hunted}" in content:
content = content.format(player_hunted=player_hunted)
if "{player_current_dead}" in content:
player_current_dead = "No one" if not player_current_dead else player_current_dead
content = content.format(player_current_dead=player_current_dead)
return content, instruction_info["send_to"], instruction_info["restricted_to"]
class ParseSpeak(Action):
name: str = "ParseSpeak"
async def run(self):
pass
class AnnounceGameResult(Action):
async def run(self, winner: str, win_reason: str):
return f"Game over! {win_reason}. The winner is the {winner}"
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/werewolf/roles/moderator.py | metagpt/ext/werewolf/roles/moderator.py | import re
from datetime import datetime
from typing import Union
from metagpt.actions.add_requirement import UserRequirement
from metagpt.const import DEFAULT_WORKSPACE_ROOT, MESSAGE_ROUTE_TO_ALL
from metagpt.environment.werewolf.const import (
STEP_INSTRUCTIONS,
RoleActionRes,
RoleState,
RoleType,
)
from metagpt.environment.werewolf.env_space import EnvAction, EnvActionType
from metagpt.ext.werewolf.actions import Hunt, Poison, Protect, Save, Verify
from metagpt.ext.werewolf.actions.moderator_actions import (
AnnounceGameResult,
InstructSpeak,
ParseSpeak,
)
from metagpt.ext.werewolf.roles.base_player import BasePlayer
from metagpt.ext.werewolf.schema import WwMessage
from metagpt.logs import logger
from metagpt.utils.common import any_to_str
class Moderator(BasePlayer):
name: str = RoleType.MODERATOR.value
profile: str = RoleType.MODERATOR.value
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._watch([UserRequirement, InstructSpeak, ParseSpeak])
self.set_actions([InstructSpeak, ParseSpeak, AnnounceGameResult])
# game states
self.step_idx = 0
self.game_setup = ""
self.werewolf_players = []
self.winner = None
self.win_reason = None
self.witch_poison_left = 1
self.witch_antidote_left = 1
def update_player_status(self, player_names: list[str]):
if not player_names:
return
roles_in_env = self.rc.env.get_roles()
for role_setting, role in roles_in_env.items():
for player_name in player_names:
if player_name in role_setting:
role.set_status(new_status=RoleState.DEAD) # 更新为死亡
def _record_all_experiences(self):
logger.info(f"The winner of the game: {self.winner}, start to record roles' experiences")
roles_in_env = self.rc.env.get_roles()
timestamp = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
for _, role in roles_in_env.items():
if role == self:
continue
if self.winner == "werewolf":
outcome = "won" if role.profile in RoleType.WEREWOLF.value else "lost"
else:
outcome = "won" if role.profile not in RoleType.WEREWOLF.value else "lost"
role.record_experiences(round_id=timestamp, outcome=outcome, game_setup=self.game_setup)
async def _parse_speak(self, memories):
latest_msg = memories[-1]
latest_msg_content = latest_msg.content
match = re.search(r"Player[0-9]+", latest_msg_content[-10:]) # FIXME: hard code truncation
target = match.group(0) if match else ""
# default return
msg_content = "Understood"
restricted_to = set()
msg_cause_by = latest_msg.cause_by
if msg_cause_by == any_to_str(Hunt):
self.rc.env.step(
EnvAction(
action_type=EnvActionType.WOLF_KILL, player_name=latest_msg.sent_from, target_player_name=target
)
)
elif msg_cause_by == any_to_str(Protect):
self.rc.env.step(
EnvAction(
action_type=EnvActionType.GUARD_PROTECT, player_name=latest_msg.sent_from, target_player_name=target
)
)
elif msg_cause_by == any_to_str(Verify):
if target in self.werewolf_players:
msg_content = f"{target} is a werewolf"
else:
msg_content = f"{target} is a good guy"
restricted_to = {RoleType.MODERATOR.value, RoleType.SEER.value}
elif msg_cause_by == any_to_str(Save):
if RoleActionRes.PASS.value in latest_msg_content.lower():
# the role ignore to response, answer `pass`
pass
elif not self.witch_antidote_left:
msg_content = "You have no antidote left and thus can not save the player"
restricted_to = {RoleType.MODERATOR.value, RoleType.WITCH.value}
else:
self.rc.env.step(
EnvAction(
action_type=EnvActionType.WITCH_SAVE,
player_name=latest_msg.sent_from,
target_player_name=target,
)
)
elif msg_cause_by == any_to_str(Poison):
if RoleActionRes.PASS.value in latest_msg_content.lower():
pass
elif not self.witch_poison_left:
msg_content = "You have no poison left and thus can not poison the player"
restricted_to = {RoleType.MODERATOR.value, RoleType.WITCH.value}
else:
self.rc.env.step(
EnvAction(
action_type=EnvActionType.WITCH_POISON,
player_name=latest_msg.sent_from,
target_player_name=target,
)
)
return msg_content, restricted_to
def _update_player_status(self, step_idx: int, player_current_dead: list[str]):
"""update dead player's status"""
if step_idx in [15, 18]:
self.update_player_status(player_current_dead)
def _record_game_history(self, step_idx: int):
if step_idx and step_idx % len(STEP_INSTRUCTIONS) == 0 or self.winner:
logger.info("a night and day cycle completed, examine all history")
logger.debug(f"all_memories: {self.get_all_memories()}")
with open(DEFAULT_WORKSPACE_ROOT / "werewolf_transcript.txt", "w") as f:
f.write(self.get_all_memories())
async def _observe(self, ignore_memory=False) -> int:
news = []
if not news:
news = self.rc.msg_buffer.pop_all()
old_messages = [] if ignore_memory else self.rc.memory.get()
for m in news:
if len(m.restricted_to) and self.profile not in m.restricted_to and self.name not in m.restricted_to:
# if the msg is not send to the whole audience ("") nor this role (self.profile or self.name),
# then this role should not be able to receive it and record it into its memory
continue
self.rc.memory.add(m)
# add `MESSAGE_ROUTE_TO_ALL in n.send_to` make it to run `ParseSpeak`
self.rc.news = [
n
for n in news
if (n.cause_by in self.rc.watch or self.profile in n.send_to or MESSAGE_ROUTE_TO_ALL in n.send_to)
and n not in old_messages
]
return len(self.rc.news)
async def _think(self):
if self.winner:
self.rc.todo = AnnounceGameResult()
return
latest_msg = self.rc.memory.get()[-1]
if latest_msg.role in ["User", "Human", self.profile]:
# 1. 上一轮消息是用户指令,解析用户指令,开始游戏
# 2.1. 上一轮消息是Moderator自己的指令,继续发出指令,一个事情可以分几条消息来说
# 2.2. 上一轮消息是Moderator自己的解析消息,一个阶段结束,发出新一个阶段的指令
self.rc.todo = InstructSpeak()
else:
# 上一轮消息是游戏角色的发言,解析角色的发言
self.rc.todo = ParseSpeak()
return True
def _init_fields_from_obj(self, obs: dict[str, Union[int, str, list[str]]]):
self.game_setup = obs.get("game_setup", "")
self.step_idx = obs.get("step_idx", 0)
self.winner = obs.get("winner")
self.win_reason = obs.get("win_reason")
self.werewolf_players = obs.get("werewolf_players", [])
self.witch_poison_left = obs.get("witch_poison_left", 0)
self.witch_antidote_left = obs.get("witch_antidote_left", 0)
async def _act(self):
todo = self.rc.todo
logger.info(f"{self._setting} ready to {todo}")
memories = self.get_all_memories(mode="msg")
obs, _, _, _, _ = self.rc.env.step(action=EnvAction(action_type=EnvActionType.NONE))
living_players = obs["living_players"]
werewolf_players = obs["werewolf_players"]
player_hunted = obs["player_hunted"]
player_current_dead = obs["player_current_dead"]
self._init_fields_from_obj(obs)
# 若进行完一夜一日的循环,打印和记录一次完整发言历史
self._record_game_history(self.step_idx)
# 若一晚或一日周期结束,对当晚或当日的死者进行总结,并更新玩家状态
self._update_player_status(self.step_idx, player_current_dead)
if self.winner:
self._record_all_experiences()
# 根据_think的结果,执行InstructSpeak还是ParseSpeak, 并将结果返回
if isinstance(todo, InstructSpeak):
msg_content, msg_to_send_to, msg_restricted_to = await InstructSpeak().run(
self.step_idx,
living_players=living_players,
werewolf_players=werewolf_players,
player_hunted=player_hunted,
player_current_dead=player_current_dead,
)
# msg_content = f"Step {self.step_idx}: {msg_content}" # HACK: 加一个unique的step_idx避免记忆的自动去重
msg = WwMessage(
content=msg_content,
role=self.profile,
sent_from=self.name,
cause_by=InstructSpeak,
send_to=msg_to_send_to,
restricted_to=msg_restricted_to,
)
logger.info(f"current step_idx: {self.step_idx}")
self.rc.env.step(EnvAction(action_type=EnvActionType.PROGRESS_STEP)) # to update step_idx
elif isinstance(todo, ParseSpeak):
msg_content, msg_restricted_to = await self._parse_speak(memories)
# msg_content = f"Step {self.step_idx}: {msg_content}" # HACK: 加一个unique的step_idx避免记忆的自动去重
msg = WwMessage(
content=msg_content,
role=self.profile,
sent_from=self.name,
cause_by=ParseSpeak,
send_to={},
restricted_to=msg_restricted_to,
)
elif isinstance(todo, AnnounceGameResult):
msg_content = await AnnounceGameResult().run(winner=self.winner, win_reason=self.win_reason)
msg = WwMessage(content=msg_content, role=self.profile, sent_from=self.name, cause_by=AnnounceGameResult)
logger.info(f"{self._setting}: {msg_content}")
return msg
def get_all_memories(self, mode="str") -> str:
memories = self.rc.memory.get()
if mode == "str":
memories = [f"{m.sent_from}({m.role}): {m.content}" for m in memories]
memories = "\n".join(memories)
return memories
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/werewolf/roles/villager.py | metagpt/ext/werewolf/roles/villager.py | from metagpt.environment.werewolf.const import RoleType
from metagpt.ext.werewolf.roles.base_player import BasePlayer
class Villager(BasePlayer):
name: str = RoleType.VILLAGER.value
profile: str = RoleType.VILLAGER.value
special_action_names: list[str] = []
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/werewolf/roles/witch.py | metagpt/ext/werewolf/roles/witch.py | from metagpt.environment.werewolf.const import RoleType
from metagpt.ext.werewolf.actions import InstructSpeak, Poison, Save, Speak
from metagpt.ext.werewolf.roles.base_player import BasePlayer
from metagpt.utils.common import any_to_str
class Witch(BasePlayer):
name: str = RoleType.WITCH.value
profile: str = RoleType.WITCH.value
special_action_names: list[str] = ["Save", "Poison"]
async def _think(self):
"""女巫涉及两个特殊技能,因此在此需要改写_think进行路由"""
news = self.rc.news[0]
assert news.cause_by == any_to_str(InstructSpeak) # 消息为来自Moderator的指令时,才去做动作
if not news.restricted_to:
# 消息接收范围为全体角色的,做公开发言(发表投票观点也算发言)
self.rc.todo = Speak()
elif self.profile in news.restricted_to:
# FIXME: hard code to split, restricted为"Moderator"或"Moderator,角色profile"
# Moderator加密发给自己的,意味着要执行角色的特殊动作
# 这里用关键词进行动作的选择,需要Moderator侧的指令进行配合
if "save" in news.content.lower():
self.rc.todo = Save()
elif "poison" in news.content.lower():
self.rc.todo = Poison()
else:
raise ValueError("Moderator's instructions must include save or poison keyword")
return True
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/werewolf/roles/werewolf.py | metagpt/ext/werewolf/roles/werewolf.py | from metagpt.environment.werewolf.const import RoleType
from metagpt.ext.werewolf.actions import Impersonate, Speak
from metagpt.ext.werewolf.roles.base_player import BasePlayer
class Werewolf(BasePlayer):
name: str = RoleType.WEREWOLF.value
profile: str = RoleType.WEREWOLF.value
special_action_names: list[str] = ["Hunt"]
async def _think(self):
"""狼人白天发言时需要伪装,与其他角色不同,因此需要重写_think"""
await super()._think()
if isinstance(self.rc.todo, Speak):
self.rc.todo = Impersonate()
return True
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/werewolf/roles/__init__.py | metagpt/ext/werewolf/roles/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
from metagpt.ext.werewolf.roles.base_player import BasePlayer
from metagpt.ext.werewolf.roles.guard import Guard
from metagpt.ext.werewolf.roles.seer import Seer
from metagpt.ext.werewolf.roles.villager import Villager
from metagpt.ext.werewolf.roles.werewolf import Werewolf
from metagpt.ext.werewolf.roles.witch import Witch
from metagpt.ext.werewolf.roles.moderator import Moderator
__all__ = ["BasePlayer", "Guard", "Moderator", "Seer", "Villager", "Witch", "Werewolf"]
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/werewolf/roles/human_player.py | metagpt/ext/werewolf/roles/human_player.py | from metagpt.environment.werewolf.const import RoleType
from metagpt.ext.werewolf.actions import Speak
from metagpt.ext.werewolf.roles import BasePlayer
from metagpt.ext.werewolf.schema import WwMessage
from metagpt.logs import logger
async def _act(self):
todo = self.rc.todo
memories = self.get_all_memories()
input_instruction = f"""
## As a reminder, you have access to the following game history:
{memories}
## You are {self.name}({self.profile})
## Guidance:
1. If you are performing a special action or exercising a vote,
end your response with "PlayerX", replace PlayerX with the actual player name, e.g., "..., kill/protect/poison/.../vote Player1".
2. If it is a daytime free speech, you can speak in whatever format.
Now, please speak:
"""
rsp = input(input_instruction) # wait for human input
msg_cause_by = type(todo)
msg_restricted_to = {} if isinstance(todo, Speak) else {RoleType.MODERATOR.value, self.profile}
msg = WwMessage(
content=rsp,
role=self.profile,
sent_from=self.name,
cause_by=msg_cause_by,
send_to={},
restricted_to=msg_restricted_to, # 给Moderator及自身阵营发送加密消息
)
logger.info(f"{self._setting}: {rsp}")
return msg
def prepare_human_player(player_class: BasePlayer):
# Dynamically define a human player class that inherits from a certain role class
HumanPlayer = type("HumanPlayer", (player_class,), {"_act": _act})
return HumanPlayer
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/werewolf/roles/guard.py | metagpt/ext/werewolf/roles/guard.py | from metagpt.environment.werewolf.const import RoleType
from metagpt.ext.werewolf.roles.base_player import BasePlayer
class Guard(BasePlayer):
name: str = RoleType.GUARD.value
profile: str = RoleType.GUARD.value
special_action_names: list[str] = ["Protect"]
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/werewolf/roles/base_player.py | metagpt/ext/werewolf/roles/base_player.py | import re
from pydantic import Field, SerializeAsAny, model_validator
from metagpt.actions.action import Action
from metagpt.environment.werewolf.const import RoleState, RoleType
from metagpt.ext.werewolf.actions import (
ACTIONS,
AddNewExperiences,
InstructSpeak,
NighttimeWhispers,
Reflect,
RetrieveExperiences,
Speak,
)
from metagpt.ext.werewolf.schema import RoleExperience, WwMessage
from metagpt.logs import logger
from metagpt.roles import Role
from metagpt.utils.common import any_to_str
class BasePlayer(Role):
name: str = "PlayerXYZ"
profile: str = "BasePlayer"
special_action_names: list[str] = []
use_reflection: bool = True
use_experience: bool = False
use_memory_selection: bool = False
new_experience_version: str = ""
status: RoleState = RoleState.ALIVE
special_actions: list[SerializeAsAny[Action]] = Field(default=[], validate_default=True)
experiences: list[RoleExperience] = []
def __init__(self, **kwargs):
super().__init__(**kwargs)
# 技能和监听配置
self._watch([InstructSpeak]) # 监听Moderator的指令以做行动
special_actions = [ACTIONS[action_name] for action_name in self.special_action_names]
capable_actions = [Speak] + special_actions
self.set_actions(capable_actions) # 给角色赋予行动技能
self.special_actions = special_actions
if not self.use_reflection and self.use_experience:
logger.warning("You must enable use_reflection before using experience")
self.use_experience = False
@model_validator(mode="after")
def check_addresses(self):
if not self.addresses:
self.addresses = {any_to_str(self), self.name, self.profile} if self.name else {any_to_str(self)}
return self
async def _observe(self, ignore_memory=False) -> int:
if self.status != RoleState.ALIVE:
# 死者不再参与游戏
return 0
news = []
if not news:
news = self.rc.msg_buffer.pop_all()
old_messages = [] if ignore_memory else self.rc.memory.get()
for m in news:
if len(m.restricted_to) and self.profile not in m.restricted_to and self.name not in m.restricted_to:
# if the msg is not send to the whole audience ("") nor this role (self.profile or self.name),
# then this role should not be able to receive it and record it into its memory
continue
self.rc.memory.add(m)
self.rc.news = [
n for n in news if (n.cause_by in self.rc.watch or self.profile in n.send_to) and n not in old_messages
]
# TODO to delete
# await super()._observe()
# # 只有发给全体的("")或发给自己的(self.profile)消息需要走下面的_react流程,
# # 其他的收听到即可,不用做动作
# self.rc.news = [msg for msg in self.rc.news if msg.send_to in ["", self.profile]]
return len(self.rc.news)
async def _think(self):
news = self.rc.news[0]
assert news.cause_by == any_to_str(InstructSpeak) # 消息为来自Moderator的指令时,才去做动作
if not news.restricted_to:
# 消息接收范围为全体角色的,做公开发言(发表投票观点也算发言)
self.rc.todo = Speak()
elif self.profile in news.restricted_to:
# FIXME: hard code to split, restricted为"Moderator"或"Moderator, 角色profile"
# Moderator加密发给自己的,意味着要执行角色的特殊动作
self.rc.todo = self.special_actions[0]()
return True
async def _act(self):
# todo为_think时确定的,有两种情况,Speak或Protect
todo = self.rc.todo
logger.info(f"{self._setting}: ready to {str(todo)}")
# 可以用这个函数获取该角色的全部记忆和最新的instruction
memories = self.get_all_memories()
latest_instruction = self.get_latest_instruction()
reflection = (
await Reflect().run(
profile=self.profile, name=self.name, context=memories, latest_instruction=latest_instruction
)
if self.use_reflection
else ""
)
experiences = (
RetrieveExperiences().run(
query=reflection, profile=self.profile, excluded_version=self.new_experience_version
)
if self.use_experience
else ""
)
# 根据自己定义的角色Action,对应地去run,run的入参可能不同
if isinstance(todo, Speak):
rsp = await todo.run(
profile=self.profile,
name=self.name,
context=memories,
latest_instruction=latest_instruction,
reflection=reflection,
experiences=experiences,
)
restricted_to = set()
elif isinstance(todo, NighttimeWhispers):
rsp = await todo.run(
profile=self.profile, name=self.name, context=memories, reflection=reflection, experiences=experiences
)
restricted_to = {RoleType.MODERATOR.value, self.profile} # 给Moderator发送使用特殊技能的加密消息
msg = WwMessage(
content=rsp,
role=self.profile,
sent_from=self.name,
cause_by=type(todo),
send_to={},
restricted_to=restricted_to,
)
self.experiences.append(
RoleExperience(
name=self.name,
profile=self.profile,
reflection=reflection,
instruction=latest_instruction,
response=rsp,
version=self.new_experience_version,
)
)
logger.info(f"{self._setting}: {rsp}")
return msg
def get_all_memories(self) -> str:
memories = self.rc.memory.get()
time_stamp_pattern = r"[0-9]+ \| "
# NOTE: 除Moderator外,其他角色使用memory,只能用m.sent_from(玩家名)不能用m.role(玩家角色),因为他们不知道说话者的身份
memories = [f"{m.sent_from}: {re.sub(time_stamp_pattern, '', m.content)}" for m in memories] # regex去掉时间戳
memories = "\n".join(memories)
return memories
def get_latest_instruction(self) -> str:
return self.rc.important_memory[-1].content # 角色监听着Moderator的InstructSpeak,是其重要记忆,直接获取即可
def set_status(self, new_status: RoleState):
self.status = new_status
def record_experiences(self, round_id: str, outcome: str, game_setup: str):
experiences = [exp for exp in self.experiences if len(exp.reflection) > 2] # not "" or not '""'
for exp in experiences:
exp.round_id = round_id
exp.outcome = outcome
exp.game_setup = game_setup
AddNewExperiences().run(experiences)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/werewolf/roles/seer.py | metagpt/ext/werewolf/roles/seer.py | from metagpt.environment.werewolf.const import RoleType
from metagpt.ext.werewolf.roles.base_player import BasePlayer
class Seer(BasePlayer):
name: str = RoleType.SEER.value
profile: str = RoleType.SEER.value
special_action_names: list[str] = ["Verify"]
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/stanford_town.py | metagpt/ext/stanford_town/stanford_town.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : StanfordTown to works like SoftwareCompany
from typing import Any, Optional
from metagpt.context import Context
from metagpt.environment import StanfordTownEnv
from metagpt.ext.stanford_town.roles.st_role import STRole
from metagpt.ext.stanford_town.utils.const import MAZE_ASSET_PATH
from metagpt.logs import logger
from metagpt.team import Team
class StanfordTown(Team):
env: Optional[StanfordTownEnv] = None
def __init__(self, context: Context = None, **data: Any):
super(Team, self).__init__(**data)
ctx = context or Context()
if not self.env:
self.env = StanfordTownEnv(context=ctx, maze_asset_path=MAZE_ASSET_PATH)
else:
self.env.context = ctx # The `env` object is allocated by deserialization
async def hire(self, roles: list[STRole]):
logger.warning(f"The Town add {len(roles)} roles, and start to operate.")
super().hire(roles)
for role in roles:
await role.init_curr_tile()
async def run(self, n_round: int = 3):
"""Run company until target round or no money"""
while n_round > 0:
n_round -= 1
logger.debug(f"{n_round=}")
self._check_balance()
await self.env.run()
# save simulation result including environment and roles after all rounds
roles = self.env.get_roles()
for profile, role in roles.items():
role.save_into()
return self.env.history
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/__init__.py | metagpt/ext/stanford_town/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : stanford town implement
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/plan/converse.py | metagpt/ext/stanford_town/plan/converse.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : conversation between two agents
from typing import Tuple
from metagpt.ext.stanford_town.actions.agent_chat_sum_rel import AgentChatSumRel
from metagpt.ext.stanford_town.actions.gen_iter_chat_utt import GenIterChatUTT
from metagpt.ext.stanford_town.memory.retrieve import new_agent_retrieve
from metagpt.logs import logger
async def agent_conversation(init_role: "STRole", target_role: "STRole", conv_rounds: int = 8) -> list[list[str]]:
curr_chat = []
logger.info(f"Role: {init_role.name} starts a conversation with Role: {target_role.name}")
for idx in range(conv_rounds):
logger.info(f"Conv round: {idx} between {init_role.name} and {target_role.name}")
scratch = init_role.rc.scratch
target_scratch = target_role.rc.scratch
focal_points = [f"{target_scratch.name}"]
retrieved = new_agent_retrieve(init_role, focal_points, 50)
relationship = await generate_summarize_agent_relationship(init_role, target_role, retrieved)
logger.info(f"The relationship between {init_role.name} and {target_role.name}: {relationship}")
last_chat = ""
for i in curr_chat[-4:]:
last_chat += ": ".join(i) + "\n"
if last_chat:
focal_points = [f"{relationship}", f"{target_scratch.name} is {target_scratch.act_description}", last_chat]
else:
focal_points = [f"{relationship}", f"{target_scratch.name} is {target_scratch.act_description}"]
retrieved = new_agent_retrieve(init_role, focal_points, 15)
utt, end = await generate_one_utterance(init_role, target_role, retrieved, curr_chat)
curr_chat += [[scratch.name, utt]]
if end:
break
focal_points = [f"{scratch.name}"]
retrieved = new_agent_retrieve(target_role, focal_points, 50)
relationship = await generate_summarize_agent_relationship(target_role, init_role, retrieved)
logger.info(f"The relationship between {target_role.name} and {init_role.name}: {relationship}")
last_chat = ""
for i in curr_chat[-4:]:
last_chat += ": ".join(i) + "\n"
if last_chat:
focal_points = [f"{relationship}", f"{scratch.name} is {scratch.act_description}", last_chat]
else:
focal_points = [f"{relationship}", f"{scratch.name} is {scratch.act_description}"]
retrieved = new_agent_retrieve(target_role, focal_points, 15)
utt, end = await generate_one_utterance(target_role, init_role, retrieved, curr_chat)
curr_chat += [[target_scratch.name, utt]]
if end:
break
logger.warning(f"Conversations between {target_role.name} and {init_role.name}:")
for row in curr_chat:
logger.info(row)
return curr_chat
async def generate_summarize_agent_relationship(init_role: "STRole", target_role: "STRole", retrieved: dict) -> str:
all_embedding_keys = list()
for key, val in retrieved.items():
for i in val:
all_embedding_keys += [i.embedding_key]
all_embedding_key_str = ""
for i in all_embedding_keys:
all_embedding_key_str += f"{i}\n"
summarized_relationship = await AgentChatSumRel().run(init_role, target_role, all_embedding_key_str)
return summarized_relationship
async def generate_one_utterance(init_role, target_role, retrieved: dict, curr_chat: list) -> Tuple[str, str]:
# Chat version optimized for speed via batch generation
scratch = init_role.rc.scratch
target_scratch = target_role.rc.scratch
curr_context = (
f"{scratch.name} "
+ f"was {scratch.act_description} "
+ f"when {scratch.name} "
+ f"saw {target_scratch.name} "
+ f"in the middle of {target_scratch.act_description}.\n"
)
curr_context += f"{scratch.name} " + "is initiating a conversation with " + f"{target_scratch.name}."
x = await GenIterChatUTT().run(init_role, target_role, retrieved, curr_context, curr_chat)
return x["utterance"], x["end"]
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/plan/__init__.py | metagpt/ext/stanford_town/plan/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/plan/st_plan.py | metagpt/ext/stanford_town/plan/st_plan.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : st' planning execution
import datetime
import math
import random
from typing import Tuple, Union
from metagpt.ext.stanford_town.actions.decide_to_talk import DecideToTalk
from metagpt.ext.stanford_town.actions.gen_action_details import GenActionDetails
from metagpt.ext.stanford_town.actions.gen_daily_schedule import GenDailySchedule
from metagpt.ext.stanford_town.actions.gen_hourly_schedule import GenHourlySchedule
from metagpt.ext.stanford_town.actions.new_decomp_schedule import NewDecompSchedule
from metagpt.ext.stanford_town.actions.summarize_conv import SummarizeConv
from metagpt.ext.stanford_town.actions.task_decomp import TaskDecomp
from metagpt.ext.stanford_town.actions.wake_up import WakeUp
from metagpt.ext.stanford_town.memory.retrieve import new_agent_retrieve
from metagpt.ext.stanford_town.plan.converse import agent_conversation
from metagpt.ext.stanford_town.utils.utils import get_embedding
from metagpt.llm import LLM
from metagpt.logs import logger
async def plan(role: "STRole", roles: dict["STRole"], new_day: bool, retrieved: dict) -> str:
# PART 1: Generate the hourly schedule.
if new_day:
await _long_term_planning(role, new_day)
# PART 2: If the current action has expired, we want to create a new plan.
act_check_finished = role.scratch.act_check_finished()
logger.info(f"Role: {role.name} act_check_finished is {act_check_finished}")
if act_check_finished:
await _determine_action(role)
# PART 3: If you perceived an event that needs to be responded to (saw
# another role), and retrieved relevant information.
# Step 1: Retrieved may have multiple events represented in it. The first
# job here is to determine which of the events we want to focus
# on for the role.
# <focused_event> takes the form of a dictionary like this:
# dictionary {["curr_event"] = <ConceptNode>,
# ["events"] = [<ConceptNode>, ...],
# ["thoughts"] = [<ConceptNode>, ...]}
focused_event = False
if retrieved.keys():
focused_event = _choose_retrieved(role.name, retrieved)
# Step 2: Once we choose an event, we need to determine whether the
# role will take any actions for the perceived event. There are
# three possible modes of reaction returned by _should_react.
# a) "chat with {target_role.name}"
# b) "react"
# c) False
logger.info(f"Role: {role.name} focused_event: {focused_event}")
if focused_event:
reaction_mode = await _should_react(role, focused_event, roles)
logger.info(f"Role: {role.name} reaction_mode: {reaction_mode}")
if reaction_mode:
# If we do want to chat, then we generate conversation
if reaction_mode[:9] == "chat with":
await _chat_react(role, reaction_mode, roles)
elif reaction_mode[:4] == "wait":
await _wait_react(role, reaction_mode)
# Step 3: Chat-related state clean up.
# If the persona is not chatting with anyone, we clean up any of the
# chat-related states here.
if role.rc.scratch.act_event[1] != "chat with":
role.rc.scratch.chatting_with = None
role.rc.scratch.chat = None
role.rc.scratch.chatting_end_time = None
# We want to make sure that the persona does not keep conversing with each
# other in an infinite loop. So, chatting_with_buffer maintains a form of
# buffer that makes the persona wait from talking to the same target
# immediately after chatting once. We keep track of the buffer value here.
curr_persona_chat_buffer = role.rc.scratch.chatting_with_buffer
for persona_name, buffer_count in curr_persona_chat_buffer.items():
if persona_name != role.rc.scratch.chatting_with:
role.rc.scratch.chatting_with_buffer[persona_name] -= 1
return role.rc.scratch.act_address
def _choose_retrieved(role_name: str, retrieved: dict) -> Union[None, dict]:
"""
Retrieved elements have multiple core "curr_events". We need to choose one
event to which we are going to react to. We pick that event here.
Args:
role_name: Current role instance's name whose action we are determining.
retrieved: A dictionary of <ConceptNode> that were retrieved from the
the role's associative memory. This dictionary takes the
following form:
dictionary[event.description] =
{["curr_event"] = <ConceptNode>,
["events"] = [<ConceptNode>, ...],
["thoughts"] = [<ConceptNode>, ...] }
"""
# Once we are done with the reflection, we might want to build a more
# complex structure here.
# We do not want to take self events... for now
copy_retrieved = retrieved.copy()
for event_desc, rel_ctx in copy_retrieved.items():
curr_event = rel_ctx["curr_event"]
if curr_event.subject == role_name:
del retrieved[event_desc]
# Always choose role first.
priority = []
for event_desc, rel_ctx in retrieved.items():
curr_event = rel_ctx["curr_event"]
if ":" not in curr_event.subject and curr_event.subject != role_name:
priority += [rel_ctx]
if priority:
return random.choice(priority)
# Skip idle.
for event_desc, rel_ctx in retrieved.items():
if "is idle" not in event_desc:
priority += [rel_ctx]
if priority:
return random.choice(priority)
return None
async def _should_react(role: "STRole", retrieved: dict, roles: dict):
"""
Determines what form of reaction the role should exihibit given the
retrieved values.
INPUT
role: Current <"STRole"> instance whose action we are determining.
retrieved: A dictionary of <ConceptNode> that were retrieved from the
the role's associative memory. This dictionary takes the
following form:
dictionary[event.description] =
{["curr_event"] = <ConceptNode>,
["events"] = [<ConceptNode>, ...],
["thoughts"] = [<ConceptNode>, ...] }
roles: A dictionary that contains all role names as keys, and the
<"STRole"> instance as values.
"""
async def lets_talk(init_role: "STRole", target_role: "STRole", retrieved: dict):
if init_role.name == target_role.name:
logger.info(f"Role: {role.name} _should_react lets_talk meet same role, return False")
return False
scratch = init_role.rc.scratch
target_scratch = target_role.rc.scratch
if (
not target_scratch.act_address
or not target_scratch.act_description
or not scratch.act_address
or not scratch.act_description
):
return False
if "sleeping" in target_scratch.act_description or "sleeping" in scratch.act_description:
return False
if scratch.curr_time.hour == 23:
return False
if "<waiting>" in target_scratch.act_address:
return False
if target_scratch.chatting_with or scratch.chatting_with:
return False
if target_role.name in scratch.chatting_with_buffer:
if scratch.chatting_with_buffer[target_role.name] > 0:
return False
if await DecideToTalk().run(init_role, target_role, retrieved):
return True
return False
async def lets_react(init_role: "STRole", target_role: "STRole", retrieved: dict):
if init_role.name == target_role.name:
logger.info(f"Role: {role.name} _should_react lets_react meet same role, return False")
return False
scratch = init_role.rc.scratch
target_scratch = target_role.rc.scratch
if (
not target_scratch.act_address
or not target_scratch.act_description
or not scratch.act_address
or not scratch.act_description
):
return False
if "sleeping" in target_scratch.act_description or "sleeping" in scratch.act_description:
return False
# return False
if scratch.curr_time.hour == 23:
return False
if "waiting" in target_scratch.act_description:
return False
if scratch.planned_path == []:
return False
if scratch.act_address != target_scratch.act_address:
return False
react_mode = await DecideToTalk().run(init_role, target_role, retrieved)
if react_mode == "1":
wait_until = (
target_scratch.act_start_time + datetime.timedelta(minutes=target_scratch.act_duration - 1)
).strftime("%B %d, %Y, %H:%M:%S")
return f"wait: {wait_until}"
elif react_mode == "2":
return False
return "do other things"
else:
return False # "keep"
# If the role is chatting right now, default to no reaction
scratch = role.rc.scratch
if scratch.chatting_with:
return False
if "<waiting>" in scratch.act_address:
return False
# Recall that retrieved takes the following form:
# dictionary {["curr_event"] = <ConceptNode>}
curr_event = retrieved["curr_event"]
logger.info(f"Role: {role.name} _should_react curr_event.subject: {curr_event.subject}")
if ":" not in curr_event.subject:
# this is a role event.
if await lets_talk(role, roles[curr_event.subject], retrieved):
return f"chat with {curr_event.subject}"
react_mode = await lets_react(role, roles[curr_event.subject], retrieved)
return react_mode
return False
async def _chat_react(role: "STRole", reaction_mode: str, roles: dict["STRole"]):
# There are two roles -- the role who is initiating the conversation
# and the role who is the target. We get the role instances here.
init_role = role
target_role = roles[reaction_mode[9:].strip()]
# Actually creating the conversation here.
convo, duration_min = await generate_convo(init_role, target_role) # 2222
convo_summary = await generate_convo_summary(convo)
inserted_act = convo_summary
inserted_act_dur = duration_min
act_start_time = target_role.rc.scratch.act_start_time
curr_time = target_role.rc.scratch.curr_time
if curr_time.second != 0:
temp_curr_time = curr_time + datetime.timedelta(seconds=60 - curr_time.second)
chatting_end_time = temp_curr_time + datetime.timedelta(minutes=inserted_act_dur)
else:
chatting_end_time = curr_time + datetime.timedelta(minutes=inserted_act_dur)
for role, p in [("init", init_role), ("target", target_role)]:
if role == "init":
act_address = f"<persona> {target_role.name}"
act_event = (p.name, "chat with", target_role.name)
chatting_with = target_role.name
chatting_with_buffer = {}
chatting_with_buffer[target_role.name] = 800
elif role == "target":
act_address = f"<persona> {init_role.name}"
act_event = (p.name, "chat with", init_role.name)
chatting_with = init_role.name
chatting_with_buffer = {}
chatting_with_buffer[init_role.name] = 800
act_pronunciatio = "💬"
act_obj_description = None
act_obj_pronunciatio = None
act_obj_event = (None, None, None)
await _create_react(
p,
inserted_act,
inserted_act_dur,
act_address,
act_event,
chatting_with,
convo,
chatting_with_buffer,
chatting_end_time,
act_pronunciatio,
act_obj_description,
act_obj_pronunciatio,
act_obj_event,
act_start_time,
)
async def _create_react(
role: "STRole",
inserted_act: str,
inserted_act_dur: int,
act_address: str,
act_event: Tuple,
chatting_with: str,
chat: list,
chatting_with_buffer: dict,
chatting_end_time: datetime,
act_pronunciatio: str,
act_obj_description: str,
act_obj_pronunciatio: str,
act_obj_event: Tuple,
act_start_time=None,
):
p = role
scratch = role.rc.scratch
min_sum = 0
for i in range(scratch.get_f_daily_schedule_hourly_org_index()):
min_sum += scratch.f_daily_schedule_hourly_org[i][1]
start_hour = int(min_sum / 60)
if scratch.f_daily_schedule_hourly_org[scratch.get_f_daily_schedule_hourly_org_index()][1] >= 120:
end_hour = (
start_hour + scratch.f_daily_schedule_hourly_org[scratch.get_f_daily_schedule_hourly_org_index()][1] / 60
)
elif (
scratch.f_daily_schedule_hourly_org[scratch.get_f_daily_schedule_hourly_org_index()][1]
+ scratch.f_daily_schedule_hourly_org[scratch.get_f_daily_schedule_hourly_org_index() + 1][1]
):
end_hour = start_hour + (
(
scratch.f_daily_schedule_hourly_org[scratch.get_f_daily_schedule_hourly_org_index()][1]
+ scratch.f_daily_schedule_hourly_org[scratch.get_f_daily_schedule_hourly_org_index() + 1][1]
)
/ 60
)
else:
end_hour = start_hour + 2
end_hour = int(end_hour)
dur_sum = 0
count = 0
start_index = None
end_index = None
for act, dur in scratch.f_daily_schedule:
if dur_sum >= start_hour * 60 and start_index is None:
start_index = count
if dur_sum >= end_hour * 60 and end_index is None:
end_index = count
dur_sum += dur
count += 1
ret = await generate_new_decomp_schedule(p, inserted_act, inserted_act_dur, start_hour, end_hour)
scratch.f_daily_schedule[start_index:end_index] = ret
scratch.add_new_action(
act_address,
inserted_act_dur,
inserted_act,
act_pronunciatio,
act_event,
chatting_with,
chat,
chatting_with_buffer,
chatting_end_time,
act_obj_description,
act_obj_pronunciatio,
act_obj_event,
act_start_time,
)
async def _wait_react(role: "STRole", reaction_mode: str):
scratch = role.rc.scratch
inserted_act = f'waiting to start {scratch.act_description.split("(")[-1][:-1]}'
end_time = datetime.datetime.strptime(reaction_mode[6:].strip(), "%B %d, %Y, %H:%M:%S")
inserted_act_dur = (
(end_time.minute + end_time.hour * 60) - (scratch.curr_time.minute + scratch.curr_time.hour * 60) + 1
)
act_address = f"<waiting> {scratch.curr_tile[0]} {scratch.curr_tile[1]}"
act_event = (role.name, "waiting to start", scratch.act_description.split("(")[-1][:-1])
chatting_with = None
chat = None
chatting_with_buffer = None
chatting_end_time = None
act_pronunciatio = "⌛"
act_obj_description = None
act_obj_pronunciatio = None
act_obj_event = (None, None, None)
await _create_react(
role,
inserted_act,
inserted_act_dur,
act_address,
act_event,
chatting_with,
chat,
chatting_with_buffer,
chatting_end_time,
act_pronunciatio,
act_obj_description,
act_obj_pronunciatio,
act_obj_event,
)
async def generate_convo(init_role: "STRole", target_role: "STRole") -> Union[list, int]:
convo = await agent_conversation(init_role, target_role)
all_utt = ""
for row in convo:
speaker = row[0]
utt = row[1]
all_utt += f"{speaker}: {utt}\n"
convo_length = math.ceil(int(len(all_utt) / 8) / 30)
return convo, convo_length
async def generate_convo_summary(conv: list[list[str]]) -> str:
conv_summary = await SummarizeConv().run(conv)
return conv_summary
async def generate_new_decomp_schedule(
role: "STRole", inserted_act: str, inserted_act_dur: int, start_hour: int, end_hour: int
):
# Step 1: Setting up the core variables for the function.
# <p> is the role whose schedule we are editing right now.
scratch = role.rc.scratch
# <today_min_pass> indicates the number of minutes that have passed today.
today_min_pass = int(scratch.curr_time.hour) * 60 + int(scratch.curr_time.minute) + 1
# Step 2: We need to create <main_act_dur> and <truncated_act_dur>.
main_act_dur = []
truncated_act_dur = []
dur_sum = 0 # duration sum
count = 0 # enumerate count
truncated_fin = False
logger.debug(f"DEBUG::: {scratch.name}")
for act, dur in scratch.f_daily_schedule:
if (dur_sum >= start_hour * 60) and (dur_sum < end_hour * 60):
main_act_dur += [[act, dur]]
if dur_sum <= today_min_pass:
truncated_act_dur += [[act, dur]]
elif dur_sum > today_min_pass and not truncated_fin:
# We need to insert that last act, duration list like this one:
# e.g., ['wakes up and completes her morning routine (wakes up...)', 2]
truncated_act_dur += [[scratch.f_daily_schedule[count][0], dur_sum - today_min_pass]]
truncated_act_dur[-1][-1] -= (
dur_sum - today_min_pass
) # DEC 7 DEBUG;.. is the +1 the right thing to do???
# DEC 7 DEBUG;.. is the +1 the right thing to do???
# truncated_act_dur[-1][-1] -= (dur_sum - today_min_pass + 1)
logger.debug(f"DEBUG::: {truncated_act_dur}")
# DEC 7 DEBUG;.. is the +1 the right thing to do???
# truncated_act_dur[-1][-1] -= (dur_sum - today_min_pass)
truncated_fin = True
dur_sum += dur
count += 1
main_act_dur = main_act_dur
x = (
truncated_act_dur[-1][0].split("(")[0].strip()
+ " (on the way to "
+ truncated_act_dur[-1][0].split("(")[-1][:-1]
+ ")"
)
truncated_act_dur[-1][0] = x
if "(" in truncated_act_dur[-1][0]:
inserted_act = truncated_act_dur[-1][0].split("(")[0].strip() + " (" + inserted_act + ")"
# To do inserted_act_dur+1 below is an important decision but I'm not sure
# if I understand the full extent of its implications. Might want to
# revisit.
truncated_act_dur += [[inserted_act, inserted_act_dur]]
start_time_hour = datetime.datetime(2022, 10, 31, 0, 0) + datetime.timedelta(hours=start_hour)
end_time_hour = datetime.datetime(2022, 10, 31, 0, 0) + datetime.timedelta(hours=end_hour)
return await NewDecompSchedule().run(
role, main_act_dur, truncated_act_dur, start_time_hour, end_time_hour, inserted_act, inserted_act_dur
)
async def _long_term_planning(role: "STRole", new_day: bool):
"""
Formulates the role's daily long-term plan if it is the start of a new
day. This basically has two components: first, we create the wake-up hour,
and second, we create the hourly schedule based on it.
INPUT
new_day: Indicates whether the current time signals a "First day",
"New day", or False (for neither). This is important because we
create the roles' long term planning on the new day.
"""
# We start by creating the wake up hour for the role.
wake_up_hour = await WakeUp().run(role)
wake_up_hour = int(wake_up_hour)
logger.info(f"Role: {role.name} long_term_planning, wake_up_hour: {wake_up_hour}")
# When it is a new day, we start by creating the daily_req of the role.
# Note that the daily_req is a list of strings that describe the role's
# day in broad strokes.
if new_day == "First day":
# Bootstrapping the daily plan for the start of then generation:
# if this is the start of generation (so there is no previous day's
# daily requirement, or if we are on a new day, we want to create a new
# set of daily requirements.
role.scratch.daily_req = await GenDailySchedule().run(role, wake_up_hour)
logger.info(f"Role: {role.name} daily requirements: {role.scratch.daily_req}")
elif new_day == "New day":
revise_identity(role)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - TODO
# We need to create a new daily_req here...
role.scratch.daily_req = role.scratch.daily_req
# Based on the daily_req, we create an hourly schedule for the role,
# which is a list of todo items with a time duration (in minutes) that
# add up to 24 hours.
role.scratch.f_daily_schedule = await GenHourlySchedule().run(role, wake_up_hour)
logger.info(f"Role: {role.name} f_daily_schedule: {role.scratch.f_daily_schedule}")
role.scratch.f_daily_schedule_hourly_org = role.scratch.f_daily_schedule[:]
# Added March 4 -- adding plan to the memory.
thought = f"This is {role.scratch.name}'s plan for {role.scratch.curr_time.strftime('%A %B %d')}:"
for i in role.scratch.daily_req:
thought += f" {i},"
thought = thought[:-1] + "."
created = role.scratch.curr_time
expiration = role.scratch.curr_time + datetime.timedelta(days=30)
s, p, o = (role.scratch.name, "plan", role.scratch.curr_time.strftime("%A %B %d"))
keywords = set(["plan"])
thought_poignancy = 5
thought_embedding_pair = (thought, get_embedding(thought))
role.a_mem.add_thought(
created, expiration, s, p, o, thought, keywords, thought_poignancy, thought_embedding_pair, None
)
async def _determine_action(role: "STRole"):
"""
Creates the next action sequence for the role.
The main goal of this function is to run "add_new_action" on the role's
scratch space, which sets up all the action related variables for the next
action.
As a part of this, the role may need to decompose its hourly schedule as
needed.
INPUT
role: Current <Persona> instance whose action we are determining.
"""
def determine_decomp(act_desp, act_dura):
"""
Given an action description and its duration, we determine whether we need
to decompose it. If the action is about the agent sleeping, we generally
do not want to decompose it, so that's what we catch here.
INPUT:
act_desp: the description of the action (e.g., "sleeping")
act_dura: the duration of the action in minutes.
OUTPUT:
a boolean. True if we need to decompose, False otherwise.
"""
if "sleep" not in act_desp and "bed" not in act_desp:
return True
elif "sleeping" in act_desp or "asleep" in act_desp or "in bed" in act_desp:
return False
elif "sleep" in act_desp or "bed" in act_desp:
if act_dura > 60:
return False
return True
# The goal of this function is to get us the action associated with
# <curr_index>. As a part of this, we may need to decompose some large
# chunk actions.
# Importantly, we try to decompose at least two hours worth of schedule at
# any given point.
curr_index = role.scratch.get_f_daily_schedule_index()
curr_index_60 = role.scratch.get_f_daily_schedule_index(advance=60)
logger.info(f"f_daily_schedule: {role.scratch.f_daily_schedule}")
# * Decompose *
# During the first hour of the day, we need to decompose two hours
# sequence. We do that here.
if curr_index == 0:
# This portion is invoked if it is the first hour of the day.
act_desp, act_dura = role.scratch.f_daily_schedule[curr_index]
if act_dura >= 60:
# We decompose if the next action is longer than an hour, and fits the
# criteria described in determine_decomp.
if determine_decomp(act_desp, act_dura):
role.scratch.f_daily_schedule[curr_index : curr_index + 1] = await TaskDecomp().run(
role, act_desp, act_dura
)
if curr_index_60 + 1 < len(role.scratch.f_daily_schedule):
act_desp, act_dura = role.scratch.f_daily_schedule[curr_index_60 + 1]
if act_dura >= 60:
if determine_decomp(act_desp, act_dura):
role.scratch.f_daily_schedule[curr_index_60 + 1 : curr_index_60 + 2] = await TaskDecomp().run(
role, act_desp, act_dura
)
if curr_index_60 < len(role.scratch.f_daily_schedule):
# If it is not the first hour of the day, this is always invoked (it is
# also invoked during the first hour of the day -- to double up so we can
# decompose two hours in one go). Of course, we need to have something to
# decompose as well, so we check for that too.
if role.scratch.curr_time.hour < 23:
# And we don't want to decompose after 11 pm.
act_desp, act_dura = role.scratch.f_daily_schedule[curr_index_60]
if act_dura >= 60:
if determine_decomp(act_desp, act_dura):
role.scratch.f_daily_schedule[curr_index_60 : curr_index_60 + 1] = await TaskDecomp().run(
role, act_desp, act_dura
)
# * End of Decompose *
# Generate an <Action> instance from the action description and duration. By
# this point, we assume that all the relevant actions are decomposed and
# ready in f_daily_schedule.
logger.debug("DEBUG LJSDLFSKJF")
for i in role.scratch.f_daily_schedule:
logger.debug(i)
logger.debug(curr_index)
logger.debug(len(role.scratch.f_daily_schedule))
logger.debug(role.scratch.name)
# 1440
x_emergency = 0
for i in role.scratch.f_daily_schedule:
x_emergency += i[1]
if 1440 - x_emergency > 0:
logger.info(f"x_emergency__AAA: {x_emergency}")
role.scratch.f_daily_schedule += [["sleeping", 1440 - x_emergency]]
act_desp, act_dura = role.scratch.f_daily_schedule[curr_index]
new_action_details = await GenActionDetails().run(role, act_desp, act_dura)
# Adding the action to role's queue.
role.scratch.add_new_action(**new_action_details)
def revise_identity(role: "STRole"):
p_name = role.scratch.name
focal_points = [
f"{p_name}'s plan for {role.scratch.get_str_curr_date_str()}.",
f"Important recent events for {p_name}'s life.",
]
retrieved = new_agent_retrieve(role, focal_points)
statements = "[Statements]\n"
for key, val in retrieved.items():
for i in val:
statements += f"{i.created.strftime('%A %B %d -- %H:%M %p')}: {i.embedding_key}\n"
plan_prompt = statements + "\n"
plan_prompt += f"Given the statements above, is there anything that {p_name} should remember as they plan for"
plan_prompt += f" *{role.scratch.curr_time.strftime('%A %B %d')}*? "
plan_prompt += "If there is any scheduling information, be as specific as possible (include date, time, and location if stated in the statement)\n\n"
plan_prompt += f"Write the response from {p_name}'s perspective."
plan_note = LLM().ask(plan_prompt)
thought_prompt = statements + "\n"
thought_prompt += (
f"Given the statements above, how might we summarize {p_name}'s feelings about their days up to now?\n\n"
)
thought_prompt += f"Write the response from {p_name}'s perspective."
thought_note = LLM().ask(thought_prompt)
currently_prompt = (
f"{p_name}'s status from {(role.scratch.curr_time - datetime.timedelta(days=1)).strftime('%A %B %d')}:\n"
)
currently_prompt += f"{role.scratch.currently}\n\n"
currently_prompt += f"{p_name}'s thoughts at the end of {(role.scratch.curr_time - datetime.timedelta(days=1)).strftime('%A %B %d')}:\n"
currently_prompt += (plan_note + thought_note).replace("\n", "") + "\n\n"
currently_prompt += f"It is now {role.scratch.curr_time.strftime('%A %B %d')}. Given the above, write {p_name}'s status for {role.scratch.curr_time.strftime('%A %B %d')} that reflects {p_name}'s thoughts at the end of {(role.scratch.curr_time - datetime.timedelta(days=1)).strftime('%A %B %d')}. Write this in third-person talking about {p_name}."
currently_prompt += "If there is any scheduling information, be as specific as possible (include date, time, and location if stated in the statement).\n\n"
currently_prompt += "Follow this format below:\nStatus: <new status>"
new_currently = LLM().ask(currently_prompt)
role.scratch.currently = new_currently
daily_req_prompt = role.scratch.get_str_iss() + "\n"
daily_req_prompt += f"Today is {role.scratch.curr_time.strftime('%A %B %d')}. Here is {role.scratch.name}'s plan today in broad-strokes (with the time of the day. e.g., have a lunch at 12:00 pm, watch TV from 7 to 8 pm).\n\n"
daily_req_prompt += "Follow this format (the list should have 4~6 items but no more):\n"
daily_req_prompt += "1. wake up and complete the morning routine at <time>, 2. ..."
new_daily_req = LLM().ask(daily_req_prompt)
new_daily_req = new_daily_req.replace("\n", " ")
role.scratch.daily_plan_req = new_daily_req
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/actions/decide_to_talk.py | metagpt/ext/stanford_town/actions/decide_to_talk.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : device to talk to another role, return yes or no
from metagpt.ext.stanford_town.actions.st_action import STAction
from metagpt.logs import logger
class DecideToTalk(STAction):
name: str = "DecideToTalk"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
resp = False
try:
if llm_resp.split("Answer in yes or no:")[-1].strip().lower() in ["yes", "no"]:
resp = True
except ValueError:
pass
return resp
def _func_cleanup(self, llm_resp: str, prompt: str) -> str:
return llm_resp.split("Answer in yes or no:")[-1].strip().lower()
def _func_fail_default_resp(self) -> str:
return "yes"
async def run(self, init_role: "STRole", target_role: "STRole", retrieved: dict, *args, **kwargs) -> bool:
"""Run action"""
def create_prompt_input(init_role: "STRole", target_role: "STRole", retrieved: dict) -> str:
scratch = init_role.rc.scratch
target_scratch = target_role.rc.scratch
last_chat = init_role.rc.memory.get_last_chat(target_role.name)
last_chatted_time = ""
last_chat_about = ""
if last_chat:
last_chatted_time = last_chat.created.strftime("%B %d, %Y, %H:%M:%S")
last_chat_about = last_chat.description
context = ""
for c_node in retrieved["events"]:
curr_desc = c_node.description.split(" ")
curr_desc[2:3] = ["was"]
curr_desc = " ".join(curr_desc)
context += f"{curr_desc}. "
context += "\n"
for c_node in retrieved["thoughts"]:
context += f"{c_node.description}. "
curr_time = scratch.curr_time.strftime("%B %d, %Y, %H:%M:%S %p")
init_act_desc = scratch.act_description
if "(" in init_act_desc:
init_act_desc = init_act_desc.split("(")[-1][:-1]
if len(scratch.planned_path) == 0 and "waiting" not in init_act_desc:
init_p_desc = f"{init_role.name} is already {init_act_desc}"
elif "waiting" in init_act_desc:
init_p_desc = f"{init_role.name} is {init_act_desc}"
else:
init_p_desc = f"{init_role.name} is on the way to {init_act_desc}"
target_act_desc = scratch.act_description
if "(" in target_act_desc:
target_act_desc = target_act_desc.split("(")[-1][:-1]
if len(target_scratch.planned_path) == 0 and "waiting" not in init_act_desc:
target_p_desc = f"{target_role.name} is already {target_act_desc}"
elif "waiting" in init_act_desc:
target_p_desc = f"{init_role.name} is {init_act_desc}"
else:
target_p_desc = f"{target_role.name} is on the way to {target_act_desc}"
prompt_input = []
prompt_input += [context]
prompt_input += [curr_time]
prompt_input += [init_role.name]
prompt_input += [target_role.name]
prompt_input += [last_chatted_time]
prompt_input += [last_chat_about]
prompt_input += [init_p_desc]
prompt_input += [target_p_desc]
prompt_input += [init_role.name]
prompt_input += [target_role.name]
return prompt_input
prompt_input = create_prompt_input(init_role, target_role, retrieved)
prompt = self.generate_prompt_with_tmpl_filename(
prompt_input=prompt_input, tmpl_filename="decide_to_talk_v2.txt"
)
self.fail_default_resp = self._func_fail_default_resp()
output = await self._run_gpt35_max_tokens(prompt, max_tokens=20) # yes or no
result = True if output == "yes" else False
logger.info(f"Role: {init_role.name} Action: {self.cls_name} output: {result}")
return result
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/actions/new_decomp_schedule.py | metagpt/ext/stanford_town/actions/new_decomp_schedule.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : new_decomp_schedule
import datetime
from metagpt.ext.stanford_town.actions.st_action import STAction
from metagpt.logs import logger
class NewDecompSchedule(STAction):
name: str = "NewDecompSchedule"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
resp = False
try:
llm_resp = self._func_cleanup(llm_resp, prompt)
dur_sum = 0
for act, dur in llm_resp:
dur_sum += dur
if isinstance(act, str):
return False
if isinstance(dur, int):
return False
x = prompt.split("\n")[0].split("originally planned schedule from")[-1].strip()[:-1]
x = [datetime.datetime.strptime(i.strip(), "%H:%M %p") for i in x.split(" to ")]
delta_min = int((x[1] - x[0]).total_seconds() / 60)
if int(dur_sum) != int(delta_min):
return False
except Exception:
pass
return resp
def _func_cleanup(self, llm_resp: str, prompt: str) -> list:
new_schedule = prompt + " " + llm_resp.strip()
new_schedule = new_schedule.split("The revised schedule:")[-1].strip()
new_schedule = new_schedule.split("\n")
ret_temp = []
for i in new_schedule:
ret_temp += [i.split(" -- ")]
ret = []
for time_str, action in ret_temp:
start_time = time_str.split(" ~ ")[0].strip()
end_time = time_str.split(" ~ ")[1].strip()
delta = datetime.datetime.strptime(end_time, "%H:%M") - datetime.datetime.strptime(start_time, "%H:%M")
delta_min = int(delta.total_seconds() / 60)
if delta_min < 0:
delta_min = 0
ret += [[action, delta_min]]
return ret
def _func_fail_default_resp(self, main_act_dur: int, truncated_act_dur: int) -> int:
dur_sum = 0
for act, dur in main_act_dur:
dur_sum += dur
ret = truncated_act_dur[:]
ret += main_act_dur[len(ret) - 1 :]
# If there are access, we need to trim...
ret_dur_sum = 0
count = 0
over = None
for act, dur in ret:
ret_dur_sum += dur
if ret_dur_sum == dur_sum:
break
if ret_dur_sum > dur_sum:
over = ret_dur_sum - dur_sum
break
count += 1
if over:
ret = ret[: count + 1]
ret[-1][1] -= over
return ret
async def run(
self,
role: "STRole",
main_act_dur: int,
truncated_act_dur: int,
start_time_hour: datetime,
end_time_hour: datetime,
inserted_act: str,
inserted_act_dur: int,
*args,
**kwargs,
):
def create_prompt_input(
role: "STRole",
main_act_dur: int,
truncated_act_dur: int,
start_time_hour: datetime,
end_time_hour: datetime,
inserted_act: str,
inserted_act_dur: int,
):
persona_name = role.name
start_hour_str = start_time_hour.strftime("%H:%M %p")
end_hour_str = end_time_hour.strftime("%H:%M %p")
original_plan = ""
for_time = start_time_hour
for i in main_act_dur:
original_plan += (
f'{for_time.strftime("%H:%M")} ~ '
f'{(for_time + datetime.timedelta(minutes=int(i[1]))).strftime("%H:%M")} -- ' + i[0]
)
original_plan += "\n"
for_time += datetime.timedelta(minutes=int(i[1]))
new_plan_init = ""
for_time = start_time_hour
for count, i in enumerate(truncated_act_dur):
new_plan_init += (
f'{for_time.strftime("%H:%M")} ~ '
f'{(for_time + datetime.timedelta(minutes=int(i[1]))).strftime("%H:%M")} -- ' + i[0]
)
new_plan_init += "\n"
if count < len(truncated_act_dur) - 1:
for_time += datetime.timedelta(minutes=int(i[1]))
new_plan_init += (for_time + datetime.timedelta(minutes=int(i[1]))).strftime("%H:%M") + " ~"
prompt_input = [
persona_name,
start_hour_str,
end_hour_str,
original_plan,
persona_name,
inserted_act,
inserted_act_dur,
persona_name,
start_hour_str,
end_hour_str,
end_hour_str,
new_plan_init,
]
return prompt_input
prompt_input = create_prompt_input(
role, main_act_dur, truncated_act_dur, start_time_hour, end_time_hour, inserted_act, inserted_act_dur
)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, "new_decomp_schedule_v1.txt")
self.fail_default_resp = self._func_fail_default_resp(main_act_dur, truncated_act_dur)
output = await self._run_gpt35_max_tokens(prompt, max_tokens=1000)
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/actions/task_decomp.py | metagpt/ext/stanford_town/actions/task_decomp.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : task_decomp
import datetime
from metagpt.ext.stanford_town.actions.st_action import STAction
from metagpt.logs import logger
class TaskDecomp(STAction):
name: str = "TaskDecomp"
def _func_cleanup(self, llm_resp: str, prompt: str) -> list:
# TODO SOMETHING HERE sometimes fails... See screenshot
temp = [i.strip() for i in llm_resp.split("\n")]
_cr = []
cr = []
for count, i in enumerate(temp):
if count != 0:
_cr += [" ".join([j.strip() for j in i.split(" ")][3:])]
else:
_cr += [i]
for count, i in enumerate(_cr):
k = [j.strip() for j in i.split("(duration in minutes:")]
task = k[0]
if task[-1] == ".":
task = task[:-1]
duration = int(k[1].split(",")[0].strip())
cr += [[task, duration]]
total_expected_min = int(prompt.split("(total duration in minutes")[-1].split("):")[0].strip())
# TODO -- now, you need to make sure that this is the same as the sum of
# the current action sequence.
curr_min_slot = [
["dummy", -1],
] # (task_name, task_index)
for count, i in enumerate(cr):
i_task = i[0]
i_duration = i[1]
i_duration -= i_duration % 5
if i_duration > 0:
for j in range(i_duration):
curr_min_slot += [(i_task, count)]
curr_min_slot = curr_min_slot[1:]
if len(curr_min_slot) > total_expected_min:
last_task = curr_min_slot[60]
for i in range(1, 6):
curr_min_slot[-1 * i] = last_task
elif len(curr_min_slot) < total_expected_min:
last_task = curr_min_slot[-1]
for i in range(total_expected_min - len(curr_min_slot)):
curr_min_slot += [last_task]
cr_ret = [
["dummy", -1],
]
for task, task_index in curr_min_slot:
if task != cr_ret[-1][0]:
cr_ret += [[task, 1]]
else:
cr_ret[-1][1] += 1
cr = cr_ret[1:]
return cr
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
# TODO -- this sometimes generates error
try:
self._func_cleanup(llm_resp, prompt)
except Exception:
return False
return True
def _func_fail_default_resp(self) -> int:
fs = [["asleep", 0]]
return fs
async def run(self, role: "STRole", task_desc: int, truncated_act_dur: int, *args, **kwargs):
def create_prompt_input(role, task, duration):
"""
Today is Saturday June 25. From 00:00 ~ 06:00am, Maeve is
planning on sleeping, 06:00 ~ 07:00am, Maeve is
planning on waking up and doing her morning routine,
and from 07:00am ~08:00am, Maeve is planning on having breakfast.
"""
curr_f_org_index = role.scratch.get_f_daily_schedule_hourly_org_index()
all_indices = []
# if curr_f_org_index > 0:
# all_indices += [curr_f_org_index-1]
all_indices += [curr_f_org_index]
if curr_f_org_index + 1 <= len(role.scratch.f_daily_schedule_hourly_org):
all_indices += [curr_f_org_index + 1]
if curr_f_org_index + 2 <= len(role.scratch.f_daily_schedule_hourly_org):
all_indices += [curr_f_org_index + 2]
curr_time_range = ""
logger.debug("DEBUG")
logger.debug(role.scratch.f_daily_schedule_hourly_org)
logger.debug(all_indices)
summ_str = f'Today is {role.scratch.curr_time.strftime("%B %d, %Y")}. '
summ_str += "From "
for index in all_indices:
logger.debug(f"index {index}")
if index < len(role.scratch.f_daily_schedule_hourly_org):
start_min = 0
for i in range(index):
start_min += role.scratch.f_daily_schedule_hourly_org[i][1]
end_min = start_min + role.scratch.f_daily_schedule_hourly_org[index][1]
start_time = datetime.datetime.strptime("00:00:00", "%H:%M:%S") + datetime.timedelta(
minutes=start_min
)
end_time = datetime.datetime.strptime("00:00:00", "%H:%M:%S") + datetime.timedelta(
minutes=end_min
)
start_time_str = start_time.strftime("%H:%M%p")
end_time_str = end_time.strftime("%H:%M%p")
summ_str += (
f"{start_time_str} ~ {end_time_str}, {role.name} is planning "
f"on {role.scratch.f_daily_schedule_hourly_org[index][0]}, "
)
if curr_f_org_index + 1 == index:
curr_time_range = f"{start_time_str} ~ {end_time_str}"
summ_str = summ_str[:-2] + "."
prompt_input = []
prompt_input += [role.scratch.get_str_iss()]
prompt_input += [summ_str]
# prompt_input += [role.scratch.get_str_curr_date_str()]
prompt_input += [role.scratch.get_str_firstname()]
prompt_input += [role.scratch.get_str_firstname()]
prompt_input += [task]
prompt_input += [curr_time_range]
prompt_input += [duration]
prompt_input += [role.scratch.get_str_firstname()]
return prompt_input
prompt_input = create_prompt_input(role, task_desc, truncated_act_dur)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, "task_decomp_v3.txt")
self.fail_default_resp = self._func_fail_default_resp()
output = await self._run_gpt35_max_tokens(prompt, max_tokens=1000)
logger.info(f"Role: {role.name} {self.cls_name} output: {output}")
fin_output = []
time_sum = 0
for i_task, i_duration in output:
time_sum += i_duration
# HM?????????
# if time_sum < duration:
if time_sum <= truncated_act_dur:
fin_output += [[i_task, i_duration]]
else:
break
ftime_sum = 0
for fi_task, fi_duration in fin_output:
ftime_sum += fi_duration
fin_output[-1][1] += truncated_act_dur - ftime_sum
output = fin_output
task_decomp = output
ret = []
for decomp_task, duration in task_decomp:
ret += [[f"{task_desc} ({decomp_task})", duration]]
output = ret
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/actions/inner_voice_action.py | metagpt/ext/stanford_town/actions/inner_voice_action.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
from metagpt.ext.stanford_town.actions.st_action import STAction
from metagpt.logs import logger
class AgentWhisperThoughtAction(STAction):
name: str = "AgentWhisperThoughtAction"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
try:
self._func_cleanup(llm_resp, prompt)
return True
except Exception:
return False
def _func_cleanup(self, llm_resp: str, prompt: str = "") -> list:
return llm_resp.split('"')[0].strip()
def _func_fail_default_resp(self) -> str:
pass
async def run(self, role: "STRole", statements: str, test_input=None, verbose=False) -> str:
def create_prompt_input(role: "STRole", statements, test_input=None):
prompt_input = [role.scratch.name, statements]
return prompt_input
prompt_input = create_prompt_input(role, statements)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, "whisper_inner_thought_v1.txt")
output = await self._run_gpt35_max_tokens(prompt, max_tokens=50)
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/actions/summarize_conv.py | metagpt/ext/stanford_town/actions/summarize_conv.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : summarize the content of agents' conversation
from metagpt.ext.stanford_town.actions.st_action import STAction
from metagpt.logs import logger
class SummarizeConv(STAction):
name: str = "SummarizeConv"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
resp = False
try:
_ = self._func_cleanup(llm_resp, prompt)
resp = True
except Exception:
pass
return resp
def _func_cleanup(self, llm_resp: str, prompt: str) -> str:
ret = "conversing about " + llm_resp.strip()
return ret
def _func_fail_default_resp(self) -> str:
return "conversing with a housemate about morning greetings"
async def run(self, conv: list):
def create_prompt_input(conversation: list):
convo_str = ""
for row in conversation:
convo_str += f'{row[0]}: "{row[1]}"\n'
prompt_input = [convo_str]
return prompt_input
prompt_input = create_prompt_input(conv)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, "summarize_conversation_v1.txt")
example_output = "conversing about what to eat for lunch"
special_instruction = (
"The output must continue the sentence above by filling in the <fill in> tag. "
"Don't start with 'this is a conversation about...' Just finish the sentence "
"but do not miss any important details (including who are chatting)."
)
output = await self._run_gpt35(prompt, example_output, special_instruction)
logger.info(f"Action: {self.cls_name} output: {output}")
return output
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/actions/gen_action_details.py | metagpt/ext/stanford_town/actions/gen_action_details.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : gen_action_details
import random
from metagpt.environment.stanford_town.env_space import EnvObsParams, EnvObsType
from metagpt.ext.stanford_town.actions.st_action import STAction
from metagpt.logs import logger
class GenActionSector(STAction):
name: str = "GenActionSector"
def _func_cleanup(self, llm_resp: str, prompt: str):
cleaned_response = llm_resp.split("}")[0]
return cleaned_response
def _func_validate(self, llm_resp: str, prompt: str):
if len(llm_resp.strip()) < 1:
return False
if "}" not in llm_resp:
return False
if "," in llm_resp:
return False
return True
def _func_fail_default_resp(self):
fs = "kitchen"
return fs
async def run(self, role: "STRole", access_tile: dict[str, str], act_desp: str):
def create_prompt_input(role, access_tile: dict[str, str], act_desp):
act_world = f"{access_tile['world']}"
prompt_input = []
prompt_input += [role.scratch.get_str_name()]
prompt_input += [role.scratch.living_area.split(":")[1]]
x = f"{act_world}:{role.scratch.living_area.split(':')[1]}"
prompt_input += [role.s_mem.get_str_accessible_sector_arenas(x)]
prompt_input += [role.scratch.get_str_name()]
prompt_input += [f"{access_tile['sector']}"]
x = f"{act_world}:{access_tile['sector']}"
prompt_input += [role.s_mem.get_str_accessible_sector_arenas(x)]
if role.scratch.get_str_daily_plan_req() != "":
prompt_input += [f"\n{role.scratch.get_str_daily_plan_req()}"]
else:
prompt_input += [""]
# MAR 11 TEMP
prompt_input = []
act_world = access_tile["world"]
accessible_sector_str = role.s_mem.get_str_accessible_sectors(act_world)
curr = accessible_sector_str.split(", ")
fin_accessible_sectors = []
for i in curr:
if "'s house" in i:
if role.scratch.last_name in i:
fin_accessible_sectors += [i]
else:
fin_accessible_sectors += [i]
accessible_sector_str = ", ".join(fin_accessible_sectors)
# END MAR 11 TEMP
prompt_input += [accessible_sector_str]
act_desp_1 = act_desp
act_desp_2 = act_desp
if "(" in act_desp:
act_desp_1 = act_desp.split("(")[0].strip()
act_desp_2 = act_desp.split("(")[-1][:-1]
prompt_input += [role.scratch.get_str_name()]
prompt_input += [act_desp_1]
prompt_input += [act_desp_2]
prompt_input += [role.scratch.get_str_name()]
return prompt_input
prompt_template = "action_location_sector_v1.txt"
prompt_input = create_prompt_input(role, access_tile, act_desp)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, prompt_template)
self.fail_default_resp = self._func_fail_default_resp()
output = await self._run_gpt35_max_tokens(prompt, max_tokens=15)
y = f"{access_tile['world']}"
x = [i.strip() for i in role.s_mem.get_str_accessible_sectors(y).split(",")]
if output not in x:
# output = random.choice(x)
output = role.scratch.living_area.split(":")[1]
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
class GenActionArena(STAction):
name: str = "GenActionArena"
def _func_cleanup(self, llm_resp: str, prompt: str):
cleaned_response = llm_resp.split("}")[0]
return cleaned_response
def _func_validate(self, llm_resp: str, prompt: str):
if len(llm_resp.strip()) < 1:
return False
if "}" not in llm_resp:
return False
if "," in llm_resp:
return False
return True
def _func_fail_default_resp(self):
fs = "kitchen"
return fs
async def run(self, role: "STRole", act_desp: str, act_world: str, act_sector: str):
def create_prompt_input(role, act_desp, act_world, act_sector):
prompt_input = []
prompt_input += [role.scratch.get_str_name()]
x = f"{act_world}:{act_sector}"
prompt_input += [act_sector]
# MAR 11 TEMP
accessible_arena_str = role.s_mem.get_str_accessible_sector_arenas(x)
curr = accessible_arena_str.split(", ")
fin_accessible_arenas = []
for i in curr:
if "'s room" in i:
if role.scratch.last_name in i:
fin_accessible_arenas += [i]
else:
fin_accessible_arenas += [i]
accessible_arena_str = ", ".join(fin_accessible_arenas)
# END MAR 11 TEMP
prompt_input += [accessible_arena_str]
act_desp_1 = act_desp
act_desp_2 = act_desp
if "(" in act_desp:
act_desp_1 = act_desp.split("(")[0].strip()
act_desp_2 = act_desp.split("(")[-1][:-1]
prompt_input += [role.scratch.get_str_name()]
prompt_input += [act_desp_1]
prompt_input += [act_desp_2]
prompt_input += [role.scratch.get_str_name()]
prompt_input += [act_sector]
prompt_input += [accessible_arena_str]
return prompt_input
prompt_template = "action_location_object_vMar11.txt"
prompt_input = create_prompt_input(role, act_desp, act_world, act_sector)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, prompt_template)
self.fail_default_resp = self._func_fail_default_resp()
output = await self._run_gpt35_max_tokens(prompt, max_tokens=15)
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
class GenActionObject(STAction):
name: str = "GenActionObject"
def _func_validate(self, llm_resp: str, prompt: str):
if len(llm_resp.strip()) < 1:
return False
return True
def _func_cleanup(self, llm_resp: str, prompt: str):
cleaned_response = llm_resp.strip()
return cleaned_response
def _func_fail_default_resp(self):
fs = "bed"
return fs
async def run(self, role: "STRole", act_desp: str, temp_address: str):
def create_prompt_input(role, act_desp, temp_address):
prompt_input = []
if "(" in act_desp:
act_desp = act_desp.split("(")[-1][:-1]
prompt_input += [act_desp]
prompt_input += [role.s_mem.get_str_accessible_arena_game_objects(temp_address)]
return prompt_input
prompt_template = "action_object_v2.txt"
prompt_input = create_prompt_input(role, act_desp, temp_address)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, prompt_template)
self.fail_default_resp = self._func_fail_default_resp()
output = await self._run_gpt35_max_tokens(prompt, max_tokens=15)
x = [i.strip() for i in role.s_mem.get_str_accessible_arena_game_objects(temp_address).split(",")]
if output not in x:
output = random.choice(x)
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
class GenPronunciatio(STAction):
name: str = "GenPronunciatio"
def _func_cleanup(self, llm_resp: str, prompt: str):
cr = llm_resp.strip()
if len(cr) > 3:
cr = cr[:3]
return cr
def _func_validate(self, llm_resp: str, prompt: str):
try:
self._func_cleanup(llm_resp, prompt="")
if len(llm_resp) == 0:
return False
except Exception:
return False
return True
def _func_fail_default_resp(self):
fs = "😋"
return fs
async def run(self, role: "STRole", act_desp: str):
def create_prompt_input(act_desp):
if "(" in act_desp:
act_desp = act_desp.split("(")[-1].split(")")[0]
prompt_input = [act_desp]
return prompt_input
prompt_template = "generate_pronunciatio_v1.txt"
prompt_input = create_prompt_input(act_desp)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, prompt_template)
example_output = "🛁🧖♀️"
special_instruction = "The value for the output must ONLY contain the emojis."
self.fail_default_resp = self._func_fail_default_resp()
output = await self._run_gpt35(prompt, example_output, special_instruction)
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
class GenEventTriple(STAction):
name: str = "GenEventTriple"
def _func_cleanup(self, llm_resp: str, prompt: str):
cr = llm_resp.strip()
cr = [i.strip() for i in cr.split(")")[0].split(",")]
return cr
def _func_validate(self, llm_resp: str, prompt: str):
try:
llm_resp = self._func_cleanup(llm_resp, prompt="")
if len(llm_resp) != 2:
return False
except Exception:
return False
return True
def _func_fail_default_resp(self, role):
fs = (role.name, "is", "idle")
return fs
async def run(self, role: "STRole", act_desp: str):
def create_prompt_input(role, act_desp):
if "(" in act_desp:
act_desp = act_desp.split("(")[-1].split(")")[0]
prompt_input = [role.name, act_desp, role.name]
return prompt_input
prompt_template = "generate_event_triple_v1.txt"
prompt_input = create_prompt_input(role, act_desp)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, prompt_template)
self.fail_default_resp = self._func_fail_default_resp(role)
output = await self._run_gpt35_max_tokens(prompt, max_tokens=30)
output = (role.name, output[0], output[1])
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
class GenActObjDescription(STAction):
name: str = "GenActObjDescription"
def _func_cleanup(self, llm_resp: str, prompt: str):
cr = llm_resp.strip()
if cr[-1] == ".":
cr = cr[:-1]
return cr
def _func_validate(self, llm_resp: str, prompt: str):
try:
llm_resp = self._func_cleanup(llm_resp, prompt="")
except Exception:
return False
return True
def _func_fail_default_resp(self, act_game_object):
fs = f"{act_game_object} is idle"
return fs
async def run(self, role: "STRole", act_game_object: str, act_desp: str):
def create_prompt_input(act_game_object, act_desp, role):
prompt_input = [act_game_object, role.name, act_desp, act_game_object, act_game_object]
return prompt_input
prompt_template = "generate_obj_event_v1.txt"
prompt_input = create_prompt_input(act_game_object, act_desp, role)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, prompt_template)
example_output = "being fixed"
special_instruction = "The output should ONLY contain the phrase that should go in <fill in>."
self.fail_default_resp = self._func_fail_default_resp(act_game_object)
output = await self._run_gpt35(prompt, example_output, special_instruction)
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
class GenObjEventTriple(STAction):
name: str = "GenObjEventTriple"
def _func_cleanup(self, llm_resp: str, prompt: str):
cr = llm_resp.strip()
cr = [i.strip() for i in cr.split(")")[0].split(",")]
return cr
def _func_validate(self, llm_resp: str, prompt: str):
try:
llm_resp = self._func_cleanup(llm_resp, prompt="")
if len(llm_resp) != 2:
return False
except Exception:
return False
return True
def _func_fail_default_resp(self, act_game_object: str):
fs = (act_game_object, "is", "idle")
return fs
async def run(self, role: "STRole", act_game_object, act_obj_desp):
def create_prompt_input(act_game_object, act_obj_desp):
prompt_input = [act_game_object, act_obj_desp, act_game_object]
return prompt_input
prompt_template = "generate_event_triple_v1.txt"
prompt_input = create_prompt_input(act_game_object, act_obj_desp)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, prompt_template)
self.fail_default_resp = self._func_fail_default_resp(act_game_object)
output = await self._run_gpt35_max_tokens(prompt, max_tokens=30)
output = (act_game_object, output[0], output[1])
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
class GenActionDetails(STAction):
name: str = "GenActionDetails"
def _func_cleanup(self, llm_resp: str, prompt: str) -> list:
pass
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
# TODO -- this sometimes generates error
try:
self._func_cleanup(llm_resp)
except Exception:
return False
return True
def _func_fail_default_resp(self):
fs = {}
return fs
async def run(self, role: "STRole", act_desp: str, act_dura):
access_tile = role.rc.env.observe(
obs_params=EnvObsParams(obs_type=EnvObsType.GET_TITLE, coord=role.scratch.curr_tile)
)
act_world = access_tile["world"]
act_sector = await GenActionSector().run(role, access_tile, act_desp)
act_arena = await GenActionArena().run(role, act_desp, act_world, act_sector)
act_address = f"{act_world}:{act_sector}:{act_arena}"
if not role.s_mem.get_str_accessible_arena_game_objects(act_address):
act_game_object = "<random>"
else:
act_game_object = await GenActionObject().run(role, act_desp, act_address)
new_address = f"{act_world}:{act_sector}:{act_arena}:{act_game_object}"
act_pron = await GenPronunciatio().run(role, act_desp)
act_event = await GenEventTriple().run(role, act_desp)
# Persona's actions also influence the object states. We set those up here.
act_obj_desp = await GenActObjDescription().run(role, act_game_object, act_desp)
act_obj_pron = await GenPronunciatio().run(role, act_obj_desp)
act_obj_event = await GenObjEventTriple().run(role, act_game_object, act_obj_desp)
result_dict = {
"action_address": new_address,
"action_duration": int(act_dura),
"action_description": act_desp,
"action_pronunciatio": act_pron,
"action_event": act_event,
"chatting_with": None,
"chat": None,
"chatting_with_buffer": None,
"chatting_end_time": None,
"act_obj_description": act_obj_desp,
"act_obj_pronunciatio": act_obj_pron,
"act_obj_event": act_obj_event,
}
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {result_dict}")
return result_dict
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/actions/agent_chat_sum_rel.py | metagpt/ext/stanford_town/actions/agent_chat_sum_rel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : summarize relationship in a agent chat
from metagpt.ext.stanford_town.actions.st_action import STAction
from metagpt.logs import logger
class AgentChatSumRel(STAction):
name: str = "AgentChatSumRel"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
resp = False
try:
_ = llm_resp.split('"')[0].strip()
resp = True
except Exception:
pass
return resp
def _func_cleanup(self, llm_resp: str, prompt: str) -> str:
return llm_resp.split('"')[0].strip()
def _func_fail_default_resp(self) -> str:
pass
async def run(self, init_role: "STRole", target_role: "STRole", statements: str) -> str:
def create_prompt_input(init_role: "STRole", target_role: "STRole", statements: str) -> str:
prompt_input = [statements, init_role.name, target_role.name]
return prompt_input
prompt_input = create_prompt_input(init_role, target_role, statements)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, "summarize_chat_relationship_v2.txt")
example_output = "Jane Doe is working on a project"
special_instruction = "The output should be a string that responds to the question."
output = await self._run_gpt35(prompt, example_output, special_instruction)
logger.info(f"Role: {init_role.name} Action: {self.cls_name} output: {output}")
return output
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/actions/wake_up.py | metagpt/ext/stanford_town/actions/wake_up.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : wake_up
from metagpt.ext.stanford_town.actions.st_action import STAction
from metagpt.logs import logger
class WakeUp(STAction):
name: str = "WakeUp"
def _func_validate(self, llm_resp: str, prompt: str = None) -> bool:
try:
self._func_cleanup(llm_resp, prompt="")
except Exception:
return False
return True
def _func_cleanup(self, llm_resp: str, prompt: str) -> int:
cr = int(llm_resp.strip().lower().split("am")[0])
return cr
def _func_fail_default_resp(self) -> int:
fs = 8
return fs
async def run(self, role: "STRole"):
def create_prompt_input(role):
prompt_input = [
role.scratch.get_str_iss(),
role.scratch.get_str_lifestyle(),
role.scratch.get_str_firstname(),
]
return prompt_input
prompt_input = create_prompt_input(role)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, "wake_up_hour_v1.txt")
self.fail_default_resp = self._func_fail_default_resp()
output = await self._run_gpt35_max_tokens(prompt, max_tokens=5)
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/actions/gen_daily_schedule.py | metagpt/ext/stanford_town/actions/gen_daily_schedule.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : gen_daily_schedule
from metagpt.ext.stanford_town.actions.st_action import STAction
from metagpt.logs import logger
class GenDailySchedule(STAction):
name: str = "GenDailySchedule"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
try:
self._func_cleanup(llm_resp, prompt="")
except Exception:
return False
return True
def _func_cleanup(self, llm_resp: str, prompt: str) -> list:
cr = []
_cr = llm_resp.split(")")
for i in _cr:
if i[-1].isdigit():
i = i[:-1].strip()
if i[-1] == "." or i[-1] == ",":
cr += [i[:-1].strip()]
return cr
def _func_fail_default_resp(self) -> int:
fs = [
"wake up and complete the morning routine at 6:00 am",
"eat breakfast at 7:00 am",
"read a book from 8:00 am to 12:00 pm",
"have lunch at 12:00 pm",
"take a nap from 1:00 pm to 4:00 pm",
"relax and watch TV from 7:00 pm to 8:00 pm",
"go to bed at 11:00 pm",
]
return fs
async def run(self, role: "STRole", wake_up_hour: str):
def create_prompt_input(role, wake_up_hour):
prompt_input = []
prompt_input += [role.scratch.get_str_iss()]
prompt_input += [role.scratch.get_str_lifestyle()]
prompt_input += [role.scratch.get_str_curr_date_str()]
prompt_input += [role.scratch.get_str_firstname()]
prompt_input += [f"{str(wake_up_hour)}:00 am"]
return prompt_input
wake_up_hour = int(wake_up_hour)
prompt_template = "daily_planning_v6.txt"
prompt_input = create_prompt_input(role, wake_up_hour)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, prompt_template)
self.fail_default_resp = self._func_fail_default_resp()
output = await self._run_gpt35_max_tokens(prompt, max_tokens=500)
output = [f"wake up and complete the morning routine at {wake_up_hour}:00 am"] + output
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/actions/dummy_action.py | metagpt/ext/stanford_town/actions/dummy_action.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : dummy action to make every STRole can deal DummyMessage which is caused by DummyAction
from metagpt.actions import Action
from metagpt.schema import Message
class DummyAction(Action):
async def run(self, *args, **kwargs):
raise NotImplementedError
class DummyMessage(Message):
"""
dummy message to pass to role and make them to have a execution every round
"""
content: str = "dummy"
cause_by: str = "DummyAction"
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/actions/__init__.py | metagpt/ext/stanford_town/actions/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/actions/gen_hourly_schedule.py | metagpt/ext/stanford_town/actions/gen_hourly_schedule.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : gen_hourly_schedule
import random
import string
from metagpt.logs import logger
from .st_action import STAction
def get_random_alphanumeric(i=6, j=6):
"""
Returns a random alpha numeric strength that has the length of somewhere
between i and j.
INPUT:
i: min_range for the length
j: max_range for the length
OUTPUT:
an alpha numeric str with the length of somewhere between i and j.
"""
k = random.randint(i, j)
x = "".join(random.choices(string.ascii_letters + string.digits, k=k))
return x
class GenHourlySchedule(STAction):
name: str = "GenHourlySchedule"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
try:
self._func_cleanup(llm_resp, prompt="")
except Exception:
return False
return True
def _func_cleanup(self, llm_resp: str, prompt: str) -> list:
cr = llm_resp.strip()
if cr[-1] == ".":
cr = cr[:-1]
# to only use the first line of output
cr = cr.split("\n")[0]
return cr
def _func_fail_default_resp(self) -> int:
fs = "asleep"
return fs
async def _generate_schedule_for_given_hour(
self, role: "STRole", curr_hour_str, p_f_ds_hourly_org, hour_str, intermission2=None
):
def create_prompt_input(persona, curr_hour_str, p_f_ds_hourly_org, hour_str, intermission2=None):
schedule_format = ""
for i in hour_str:
schedule_format += f"[{persona.scratch.get_str_curr_date_str()} -- {i}]"
schedule_format += " Activity: [Fill in]\n"
schedule_format = schedule_format[:-1]
intermission_str = "Here the originally intended hourly breakdown of"
intermission_str += f" {persona.scratch.get_str_firstname()}'s schedule today: "
for count, i in enumerate(persona.scratch.daily_req):
intermission_str += f"{str(count + 1)}) {i}, "
intermission_str = intermission_str[:-2]
prior_schedule = ""
if p_f_ds_hourly_org:
prior_schedule = "\n"
for count, i in enumerate(p_f_ds_hourly_org):
prior_schedule += f"[(ID:{get_random_alphanumeric()})"
prior_schedule += f" {persona.scratch.get_str_curr_date_str()} --"
prior_schedule += f" {hour_str[count]}] Activity:"
prior_schedule += f" {persona.scratch.get_str_firstname()}"
prior_schedule += f" is {i}\n"
prompt_ending = f"[(ID:{get_random_alphanumeric()})"
prompt_ending += f" {persona.scratch.get_str_curr_date_str()}"
prompt_ending += f" -- {curr_hour_str}] Activity:"
prompt_ending += f" {persona.scratch.get_str_firstname()} is"
if intermission2:
intermission2 = f"\n{intermission2}"
prompt_input = []
prompt_input += [schedule_format]
prompt_input += [persona.scratch.get_str_iss()]
prompt_input += [prior_schedule + "\n"]
prompt_input += [intermission_str]
if intermission2:
prompt_input += [intermission2]
else:
prompt_input += [""]
prompt_input += [prompt_ending]
return prompt_input
prompt_template = "generate_hourly_schedule_v2.txt"
prompt_input = create_prompt_input(role, curr_hour_str, p_f_ds_hourly_org, hour_str, intermission2)
prompt_input_str = "\n".join(prompt_input)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, prompt_template)
self.fail_default_resp = self._func_fail_default_resp()
output = await self._run_gpt35_max_tokens(prompt, max_tokens=50)
logger.info(
f"Role: {role.name} _generate_schedule_for_given_hour prompt_input: {prompt_input_str}, "
f"output: {output}"
)
return output
async def run(self, role: "STRole", wake_up_hour: int):
hour_str = [
"00:00 AM",
"01:00 AM",
"02:00 AM",
"03:00 AM",
"04:00 AM",
"05:00 AM",
"06:00 AM",
"07:00 AM",
"08:00 AM",
"09:00 AM",
"10:00 AM",
"11:00 AM",
"12:00 PM",
"01:00 PM",
"02:00 PM",
"03:00 PM",
"04:00 PM",
"05:00 PM",
"06:00 PM",
"07:00 PM",
"08:00 PM",
"09:00 PM",
"10:00 PM",
"11:00 PM",
]
n_m1_activity = []
diversity_repeat_count = 1 # TODO mg 1->3
for i in range(diversity_repeat_count):
logger.info(f"diversity_repeat_count idx: {i}")
n_m1_activity_set = set(n_m1_activity)
if len(n_m1_activity_set) < 5:
n_m1_activity = []
for count, curr_hour_str in enumerate(hour_str):
if wake_up_hour > 0:
n_m1_activity += ["sleeping"]
wake_up_hour -= 1
else:
logger.info(f"_generate_schedule_for_given_hour idx: {count}, n_m1_activity: {n_m1_activity}")
n_m1_activity += [
await self._generate_schedule_for_given_hour(role, curr_hour_str, n_m1_activity, hour_str)
]
# Step 1. Compressing the hourly schedule to the following format:
# The integer indicates the number of hours. They should add up to 24.
# [['sleeping', 6], ['waking up and starting her morning routine', 1],
# ['eating breakfast', 1], ['getting ready for the day', 1],
# ['working on her painting', 2], ['taking a break', 1],
# ['having lunch', 1], ['working on her painting', 3],
# ['taking a break', 2], ['working on her painting', 2],
# ['relaxing and watching TV', 1], ['going to bed', 1], ['sleeping', 2]]
_n_m1_hourly_compressed = []
prev = None
prev_count = 0
for i in n_m1_activity:
if i != prev:
prev_count = 1
_n_m1_hourly_compressed += [[i, prev_count]]
prev = i
elif _n_m1_hourly_compressed:
_n_m1_hourly_compressed[-1][1] += 1
# Step 2. Expand to min scale (from hour scale)
# [['sleeping', 360], ['waking up and starting her morning routine', 60],
# ['eating breakfast', 60],..
n_m1_hourly_compressed = []
for task, duration in _n_m1_hourly_compressed:
n_m1_hourly_compressed += [[task, duration * 60]]
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {n_m1_hourly_compressed}")
return n_m1_hourly_compressed
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/actions/run_reflect_action.py | metagpt/ext/stanford_town/actions/run_reflect_action.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : Integration Reflect Action
import re
from metagpt.ext.stanford_town.actions.st_action import STAction
from metagpt.logs import logger
# Run GPT Prompt Focal Point method
class AgentFocusPt(STAction):
name: str = "AgentFocusPt"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
try:
self._func_cleanup(llm_resp, prompt)
return True
except Exception:
return False
def _func_cleanup(self, llm_resp: str, prompt: str = "") -> str:
try:
"""
Cleanup handling has been completed for run_v2
"""
return llm_resp
except Exception as exp:
logger.error(f"{self.cls_name} with error {exp}")
def _func_fail_default_resp(self) -> str:
pass
async def run(self, role: "STRole", statements: str, n: int, test_input=None) -> str:
def create_prompt_input(role: "STRole", statements, n, test_input=None):
prompt_input = [statements, str(n)]
return prompt_input
prompt_input = create_prompt_input(role, statements, n)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, "generate_focal_pt_v1.txt")
example_output = '["What should Jane do for lunch", "Does Jane like strawberry", "Who is Jane"]'
special_instruction = "Output must be a list of str."
output = await self._run_gpt35(prompt, example_output, special_instruction)
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
# Run GPT Prompt Insight and Guidance
class AgentInsightAndGuidance(STAction):
name: str = "AgentInsightAndGuidance"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
try:
self._func_cleanup(llm_resp, prompt)
return True
except Exception:
return False
def _func_cleanup(self, llm_resp: str, prompt: str = "") -> dict:
try:
llm_resp = "1. " + llm_resp.strip()
ret = dict()
for i in llm_resp.split("\n"):
row = " ".join(i.split(". ")[1:])
if "(because of " not in row:
continue
thought = row.split("(because of ")[0].strip()
if ")" not in row.split("(because of ")[1]:
continue
evi_raw = row.split("(because of ")[1].split(")")[0].strip()
evi_raw = re.findall(r"\d+", evi_raw)
evi_raw = [int(i.strip()) for i in evi_raw]
ret[thought] = evi_raw
return ret
except Exception as exp:
logger.error(f"{self.cls_name} with error {exp}")
def _func_fail_default_resp(self, n: int) -> str:
return ["I am hungry"] * n
async def run(self, role: "STRole", statements: str, n: int, test_input=None) -> dict:
def create_prompt_input(role, statements, n, test_input=None):
prompt_input = [statements, str(n)]
return prompt_input
prompt_input = create_prompt_input(role, statements, n)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, "insight_and_evidence_v1.txt")
self.fail_default_resp = self._func_fail_default_resp(n)
output = await self._run_gpt35_max_tokens(prompt, max_tokens=150)
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
# Run GPT Prompt Event Triple
class AgentEventTriple(STAction):
name: str = "AgentEventTriple"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
try:
llm_resp = self._func_cleanup(llm_resp, prompt="")
if len(llm_resp) != 2:
return False
except Exception:
return False
return True
def _func_cleanup(self, llm_resp: str, prompt: str = "") -> list:
try:
cr = llm_resp.strip()
cr = [i.strip() for i in cr.split(")")[0].split(",")]
if len(cr) != 2:
return cr[-2:]
return cr
except Exception as exp:
logger.error(f"{self.cls_name} with error {exp}")
def _func_fail_default_resp(self) -> str:
pass
async def run(self, statements: str, role: "STRole", verbose=False) -> tuple:
def create_prompt_input(statements, role):
if "(" in statements:
statements = statements.split("(")[-1].split(")")[0]
prompt_input = [role.scratch.name, statements, role.scratch.name]
return prompt_input
prompt_input = create_prompt_input(statements, role)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, "generate_event_triple_v1.txt")
output = await self._run_gpt35_max_tokens(prompt, max_tokens=30)
output = (role.scratch.name, output[0], output[1])
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
# Run GPT Prompt Event Poignancy
class AgentEventPoignancy(STAction):
name: str = "AgentEventPoignancy"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
try:
self._func_cleanup(llm_resp, prompt)
return True
except Exception:
return False
def _func_cleanup(self, llm_resp: str, prompt: str = "") -> int:
try:
llm_resp = int(llm_resp.strip())
return llm_resp
except Exception as exp:
logger.error(f"{self.cls_name} with error {exp}")
def _func_fail_default_resp(self) -> str:
pass
async def run(self, role: "STRole", statements: str, test_input=None, verbose=False) -> str:
def create_prompt_input(role: "STRole", statements: str, test_input=None):
prompt_input = [role.scratch.name, role.scratch.get_str_iss(), role.scratch.name, statements]
return prompt_input
prompt_input = create_prompt_input(role, statements)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, "poignancy_event_v1.txt")
example_output = "5" # ########
special_instruction = "The output should ONLY contain ONE integer value on the scale of 1 to 10."
output = await self._run_gpt35(prompt, example_output, special_instruction)
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
# Run GPT Prompt Chat Poignancy
class AgentChatPoignancy(STAction):
name: str = "AgentChatPoignancy"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
try:
self._func_cleanup(llm_resp, prompt)
return True
except Exception:
return False
def _func_cleanup(self, llm_resp: str, prompt: str = "") -> int:
try:
llm_resp = int(llm_resp.strip())
return llm_resp
except Exception as exp:
logger.error(f"{self.cls_name} with error {exp}")
def _func_fail_default_resp(self) -> str:
pass
async def run(self, role: "STRole", statements: str, test_input=None, verbose=False) -> str:
def create_prompt_input(role: "STRole", statements, test_input=None):
prompt_input = [role.scratch.name, role.scratch.get_str_iss(), role.scratch.name, statements]
return prompt_input
prompt_input = create_prompt_input(role, statements)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, "poignancy_chat_v1.txt")
example_output = "5" # ########
special_instruction = "The output should ONLY contain ONE integer value on the scale of 1 to 10."
output = await self._run_gpt35(prompt, example_output, special_instruction)
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
# Run GPT Prompt Planning Thought on Convo
class AgentPlanThoughtOnConvo(STAction):
name: str = "AgentPlanThoughtOnConvo"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
try:
self._func_cleanup(llm_resp, prompt)
return True
except Exception:
return False
def _func_cleanup(self, llm_resp: str, prompt: str = "") -> str:
try:
return llm_resp.split('"')[0].strip()
except Exception as exp:
logger.error(f"{self.cls_name} with error {exp}")
def _func_fail_default_resp(self) -> str:
pass
async def run(self, role: "STRole", statements: str, test_input=None, verbose=False) -> str:
def create_prompt_input(role, statements, test_input=None):
prompt_input = [statements, role.scratch.name, role.scratch.name, role.scratch.name]
return prompt_input
prompt_input = create_prompt_input(role, statements)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, "planning_thought_on_convo_v1.txt")
output = await self._run_gpt35_max_tokens(prompt, max_tokens=50)
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
# Run GPT Prompt Memory on Convo
class AgentMemoryOnConvo(STAction):
name: str = "AgentMemoryOnConvo"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
try:
self._func_cleanup(llm_resp, prompt)
return True
except Exception:
return False
def _func_cleanup(self, llm_resp: str, prompt: str = "") -> str:
try:
return llm_resp.split('"')[0].strip()
except Exception as exp:
logger.error(f"{self.cls_name} with error {exp}")
def _func_fail_default_resp(self) -> str:
pass
async def run(self, role: "STRole", statements: str, test_input=None, verbose=False) -> str:
def create_prompt_input(role, statements, test_input=None):
prompt_input = [statements, role.scratch.name, role.scratch.name, role.scratch.name]
return prompt_input
prompt_input = create_prompt_input(role, statements)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, "memo_on_convo_v1.txt")
example_output = "Jane Doe was interesting to talk to."
special_instruction = (
"The output should ONLY contain a string that summarizes anything interesting "
"that the agent may have noticed"
)
output = await self._run_gpt35(prompt, example_output, special_instruction)
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/actions/st_action.py | metagpt/ext/stanford_town/actions/st_action.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : StanfordTown Action
import json
import time
from abc import abstractmethod
from pathlib import Path
from typing import Any, Optional, Union
from metagpt.actions.action import Action
from metagpt.ext.stanford_town.utils.const import PROMPTS_DIR
from metagpt.logs import logger
class STAction(Action):
name: str = "STAction"
prompt_dir: Path = PROMPTS_DIR
fail_default_resp: Optional[str] = None
@property
def cls_name(self):
return self.__class__.__name__
@abstractmethod
def _func_validate(self, llm_resp: str, prompt: str):
raise NotImplementedError
@abstractmethod
def _func_cleanup(self, llm_resp: str, prompt: str):
raise NotImplementedError
@abstractmethod
def _func_fail_default_resp(self):
raise NotImplementedError
def generate_prompt_with_tmpl_filename(self, prompt_input: Union[str, list], tmpl_filename) -> str:
"""
same with `generate_prompt`
Args:
prompt_input: the input we want to feed in (IF THERE ARE MORE THAN ONE INPUT, THIS CAN BE A LIST.)
tmpl_filename: prompt template filename
Returns:
a str prompt that will be sent to LLM server.
"""
if isinstance(prompt_input, str):
prompt_input = [prompt_input]
prompt_input = [str(i) for i in prompt_input]
f = open(str(self.prompt_dir.joinpath(tmpl_filename)), "r")
prompt = f.read()
f.close()
for count, i in enumerate(prompt_input):
prompt = prompt.replace(f"!<INPUT {count}>!", i)
if "<commentblockmarker>###</commentblockmarker>" in prompt:
prompt = prompt.split("<commentblockmarker>###</commentblockmarker>")[1]
return prompt.strip()
async def _aask(self, prompt: str) -> str:
return await self.llm.aask(prompt)
async def _run_gpt35_max_tokens(self, prompt: str, max_tokens: int = 50, retry: int = 3):
for idx in range(retry):
try:
tmp_max_tokens_rsp = getattr(self.config.llm, "max_token", 1500)
setattr(self.config.llm, "max_token", max_tokens)
self.llm.use_system_prompt = False # to make it behave like a non-chat completions
llm_resp = await self._aask(prompt)
setattr(self.config.llm, "max_token", tmp_max_tokens_rsp)
logger.info(f"Action: {self.cls_name} llm _run_gpt35_max_tokens raw resp: {llm_resp}")
if self._func_validate(llm_resp, prompt):
return self._func_cleanup(llm_resp, prompt)
except Exception as exp:
logger.warning(f"Action: {self.cls_name} _run_gpt35_max_tokens exp: {exp}")
time.sleep(5)
return self.fail_default_resp
async def _run_gpt35(
self, prompt: str, example_output: str, special_instruction: str, retry: int = 3
) -> Union[bool, Any]:
"""same with `gpt_structure.ChatGPT_safe_generate_response`"""
prompt = '"""\n' + prompt + '\n"""\n'
prompt += f"Output the response to the prompt above in json. {special_instruction}\n"
prompt += "Example output json:\n"
prompt += '{"output": "' + str(example_output) + '"}'
for idx in range(retry):
try:
llm_resp = await self._aask(prompt)
logger.info(f"Action: {self.cls_name} llm _run_gpt35 raw resp: {llm_resp}")
end_idx = llm_resp.strip().rfind("}") + 1
llm_resp = llm_resp[:end_idx]
llm_resp = json.loads(llm_resp)["output"]
if self._func_validate(llm_resp, prompt):
return self._func_cleanup(llm_resp, prompt)
except Exception as exp:
logger.warning(f"Action: {self.cls_name} _run_gpt35 exp: {exp}")
time.sleep(5) # usually avoid `Rate limit`
return False
async def _run_gpt35_wo_extra_prompt(self, prompt: str, retry: int = 3) -> str:
for idx in range(retry):
try:
llm_resp = await self._aask(prompt)
llm_resp = llm_resp.strip()
logger.info(f"Action: {self.cls_name} llm _run_gpt35_wo_extra_prompt raw resp: {llm_resp}")
if self._func_validate(llm_resp, prompt):
return self._func_cleanup(llm_resp, prompt)
except Exception as exp:
logger.warning(f"Action: {self.cls_name} _run_gpt35_wo_extra_prompt exp: {exp}")
time.sleep(5) # usually avoid `Rate limit`
return self.fail_default_resp
async def run(self, *args, **kwargs):
"""Run action"""
raise NotImplementedError("The run method should be implemented in a subclass.")
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/actions/gen_iter_chat_utt.py | metagpt/ext/stanford_town/actions/gen_iter_chat_utt.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : generate_iterative_chat_utt
from metagpt.environment.stanford_town.env_space import EnvObsParams, EnvObsType
from metagpt.ext.stanford_town.actions.st_action import STAction
from metagpt.ext.stanford_town.utils.utils import extract_first_json_dict
from metagpt.logs import logger
class GenIterChatUTT(STAction):
name: str = "GenIterChatUTT"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
resp = False
try:
_ = extract_first_json_dict(llm_resp)
resp = True
except Exception:
pass
return resp
def _func_cleanup(self, llm_resp: str, prompt: str) -> dict:
gpt_response = extract_first_json_dict(llm_resp)
cleaned_dict = dict()
cleaned = []
for key, val in gpt_response.items():
cleaned += [val]
cleaned_dict["utterance"] = cleaned[0]
cleaned_dict["end"] = True
if "f" in str(cleaned[1]) or "F" in str(cleaned[1]):
cleaned_dict["end"] = False
return cleaned_dict
def _func_fail_default_resp(self) -> dict:
cleaned_dict = dict()
cleaned_dict["utterance"] = "..."
cleaned_dict["end"] = False
return cleaned_dict
async def run(
self,
init_role: "STRole",
target_role: "STRole",
retrieved: dict,
curr_context: str,
curr_chat: list[str],
*args,
**kwargs,
) -> dict:
def create_prompt_input(
access_tile: dict[str, str],
init_role: "STRole",
target_role: "STRole",
retrieved: dict,
curr_context: str,
curr_chat: list[str],
):
role = init_role
scratch = role.rc.scratch
target_scratch = target_role.rc.scratch
prev_convo_insert = "\n"
if role.rc.memory.chat_list:
for i in role.rc.memory.chat_list:
if i.object == target_role.name:
v1 = int((scratch.curr_time - i.created).total_seconds() / 60)
prev_convo_insert += (
f"{str(v1)} minutes ago, {scratch.name} and "
f"{target_scratch.name} were already {i.description} "
f"This context takes place after that conversation."
)
break
if prev_convo_insert == "\n":
prev_convo_insert = ""
if role.rc.memory.chat_list:
if int((scratch.curr_time - role.rc.memory.chat_list[-1].created).total_seconds() / 60) > 480:
prev_convo_insert = ""
logger.info(f"prev_convo_insert: {prev_convo_insert}")
curr_sector = f"{access_tile['sector']}"
curr_arena = f"{access_tile['arena']}"
curr_location = f"{curr_arena} in {curr_sector}"
retrieved_str = ""
for key, vals in retrieved.items():
for v in vals:
retrieved_str += f"- {v.description}\n"
convo_str = ""
for i in curr_chat:
convo_str += ": ".join(i) + "\n"
if convo_str == "":
convo_str = "[The conversation has not started yet -- start it!]"
init_iss = f"Here is Here is a brief description of {scratch.name}.\n{scratch.get_str_iss()}"
prompt_input = [
init_iss,
scratch.name,
retrieved_str,
prev_convo_insert,
curr_location,
curr_context,
scratch.name,
target_scratch.name,
convo_str,
scratch.name,
target_scratch.name,
scratch.name,
scratch.name,
scratch.name,
]
return prompt_input
access_tile = init_role.rc.env.observe(
obs_params=EnvObsParams(obs_type=EnvObsType.GET_TITLE, coord=init_role.scratch.curr_tile)
)
prompt_input = create_prompt_input(access_tile, init_role, target_role, retrieved, curr_context, curr_chat)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, "iterative_convo_v1.txt")
# original using `ChatGPT_safe_generate_response_OLD`
self.fail_default_resp = self._func_fail_default_resp()
output = await self._run_gpt35_wo_extra_prompt(prompt)
logger.info(f"Role: {init_role.name} Action: {self.cls_name} output: {output}")
return output
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/memory/agent_memory.py | metagpt/ext/stanford_town/memory/agent_memory.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : BasicMemory,AgentMemory实现
from datetime import datetime
from pathlib import Path
from typing import Optional
from pydantic import Field, field_serializer, model_validator
from metagpt.logs import logger
from metagpt.memory.memory import Memory
from metagpt.schema import Message
from metagpt.utils.common import read_json_file, write_json_file
class BasicMemory(Message):
"""
BasicMemory继承于MG的Message类,其中content属性替代description属性
Message类中对于Chat类型支持的非常好,对于Agent个体的Perceive,Reflection,Plan支持的并不多
在Type设计上,我们延续GA的三个种类,但是对于Chat种类的对话进行特别设计(具体怎么设计还没想好)
"""
memory_id: Optional[str] = Field(default=None) # 记忆ID
memory_count: int = -1 # 第几个记忆,实际数值与Memory相等
type_count: int = -1 # 第几种记忆,类型为整数
memory_type: Optional[str] = Field(default=None) # 记忆类型,包含 event,thought,chat三种类型
depth: int = -1 # 记忆深度,类型为整数
created: Optional[datetime] = Field(default=None) # 创建时间
expiration: Optional[datetime] = Field(default=None) # 记忆失效时间,默认为空()
last_accessed: Optional[datetime] = Field(default=None) # 上一次调用的时间,初始化时候与self.created一致
subject: Optional[str] = Field(default=None) # 主语
predicate: Optional[str] = Field(default=None) # 谓语
object: Optional[str] = Field(default=None) # 宾语
description: Optional[str] = Field(default=None)
embedding_key: Optional[str] = Field(default=None) # 内容与self.content一致
poignancy: int = -1 # importance值
keywords: list[str] = Field(default=[]) # keywords
filling: list = Field(default=[]) # 装的与之相关联的memory_id的列表
__hash__ = object.__hash__ # support hash in AgentMemory
@model_validator(mode="before")
@classmethod
def check_values(cls, values):
if "created" in values:
values["last_accessed"] = values["created"]
if "content" in values:
values["description"] = values["content"]
if "filling" in values:
values["filling"] = values["filling"] or []
return values
@field_serializer("created", "expiration")
def transform_time_field(self, time_field: Optional[datetime]) -> str:
if time_field:
time_field = time_field.strftime("%Y-%m-%d %H:%M:%S")
return time_field
def summary(self):
return self.subject, self.predicate, self.object
def save_to_dict(self) -> dict:
"""
将MemoryBasic类转化为字典,用于存储json文件
这里需要注意,cause_by跟GA不兼容,所以需要做一个格式转换
"""
memory_dict = dict()
node_id = self.memory_id
basic_mem_obj = self.model_dump(
include=[
"node_count",
"type_count",
"type",
"depth",
"created",
"expiration",
"subject",
"predicate",
"object",
"description",
"embedding_key",
"poignancy",
"keywords",
"filling",
"cause_by",
]
)
memory_dict[node_id] = basic_mem_obj
return memory_dict
class AgentMemory(Memory):
"""
GA中主要存储三种JSON
1. embedding.json (Dict embedding_key:embedding)
2. Node.json (Dict Node_id:Node)
3. kw_strength.json
"""
storage: list[BasicMemory] = [] # 重写Storage,存储BasicMemory所有节点
event_list: list[BasicMemory] = [] # 存储event记忆
thought_list: list[BasicMemory] = [] # 存储thought记忆
chat_list: list[BasicMemory] = [] # chat-related memory
event_keywords: dict[str, list[BasicMemory]] = dict() # 存储keywords
thought_keywords: dict[str, list[BasicMemory]] = dict()
chat_keywords: dict[str, list[BasicMemory]] = dict()
kw_strength_event: dict[str, int] = dict()
kw_strength_thought: dict[str, int] = dict()
memory_saved: Optional[Path] = Field(default=None)
embeddings: dict[str, list[float]] = dict()
def set_mem_path(self, memory_saved: Path):
self.memory_saved = memory_saved
self.load(memory_saved)
def save(self, memory_saved: Path):
"""
将MemoryBasic类存储为Nodes.json形式。复现GA中的Kw Strength.json形式
这里添加一个路径即可
TODO 这里在存储时候进行倒序存储,之后需要验证(test_memory通过)
"""
memory_json = dict()
for i in range(len(self.storage)):
memory_node = self.storage[len(self.storage) - i - 1]
memory_node = memory_node.save_to_dict()
memory_json.update(memory_node)
write_json_file(memory_saved.joinpath("nodes.json"), memory_json)
write_json_file(memory_saved.joinpath("embeddings.json"), self.embeddings)
strength_json = dict()
strength_json["kw_strength_event"] = self.kw_strength_event
strength_json["kw_strength_thought"] = self.kw_strength_thought
write_json_file(memory_saved.joinpath("kw_strength.json"), strength_json)
def load(self, memory_saved: Path):
"""
将GA的JSON解析,填充到AgentMemory类之中
"""
self.embeddings = read_json_file(memory_saved.joinpath("embeddings.json"))
memory_load = read_json_file(memory_saved.joinpath("nodes.json"))
for count in range(len(memory_load.keys())):
node_id = f"node_{str(count + 1)}"
node_details = memory_load[node_id]
node_type = node_details["type"]
created = datetime.strptime(node_details["created"], "%Y-%m-%d %H:%M:%S")
expiration = None
if node_details["expiration"]:
expiration = datetime.strptime(node_details["expiration"], "%Y-%m-%d %H:%M:%S")
s = node_details["subject"]
p = node_details["predicate"]
o = node_details["object"]
description = node_details["description"]
embedding_pair = (node_details["embedding_key"], self.embeddings[node_details["embedding_key"]])
poignancy = node_details["poignancy"]
keywords = set(node_details["keywords"])
filling = node_details["filling"]
if node_type == "thought":
self.add_thought(
created, expiration, s, p, o, description, keywords, poignancy, embedding_pair, filling
)
if node_type == "event":
self.add_event(created, expiration, s, p, o, description, keywords, poignancy, embedding_pair, filling)
if node_type == "chat":
self.add_chat(created, expiration, s, p, o, description, keywords, poignancy, embedding_pair, filling)
strength_keywords_load = read_json_file(memory_saved.joinpath("kw_strength.json"))
if strength_keywords_load["kw_strength_event"]:
self.kw_strength_event = strength_keywords_load["kw_strength_event"]
if strength_keywords_load["kw_strength_thought"]:
self.kw_strength_thought = strength_keywords_load["kw_strength_thought"]
def add(self, memory_basic: BasicMemory):
"""
Add a new message to storage, while updating the index
重写add方法,修改原有的Message类为BasicMemory类,并添加不同的记忆类型添加方式
"""
if memory_basic.memory_id in self.storage:
return
self.storage.append(memory_basic)
if memory_basic.memory_type == "chat":
self.chat_list[0:0] = [memory_basic]
return
if memory_basic.memory_type == "thought":
self.thought_list[0:0] = [memory_basic]
return
if memory_basic.memory_type == "event":
self.event_list[0:0] = [memory_basic]
return
def add_chat(
self, created, expiration, s, p, o, content, keywords, poignancy, embedding_pair, filling, cause_by=""
):
"""
调用add方法,初始化chat,在创建的时候就需要调用embedding函数
"""
memory_count = len(self.storage) + 1
type_count = len(self.thought_list) + 1
memory_type = "chat"
memory_id = f"node_{str(memory_count)}"
depth = 1
memory_node = BasicMemory(
memory_id=memory_id,
memory_count=memory_count,
type_count=type_count,
memory_type=memory_type,
depth=depth,
created=created,
expiration=expiration,
subject=s,
predicate=p,
object=o,
description=content,
embedding_key=embedding_pair[0],
poignancy=poignancy,
keywords=keywords,
filling=filling,
cause_by=cause_by,
)
keywords = [i.lower() for i in keywords]
for kw in keywords:
if kw in self.chat_keywords:
self.chat_keywords[kw][0:0] = [memory_node]
else:
self.chat_keywords[kw] = [memory_node]
self.add(memory_node)
self.embeddings[embedding_pair[0]] = embedding_pair[1]
return memory_node
def add_thought(self, created, expiration, s, p, o, content, keywords, poignancy, embedding_pair, filling):
"""
调用add方法,初始化thought
"""
memory_count = len(self.storage) + 1
type_count = len(self.thought_list) + 1
memory_type = "thought"
memory_id = f"node_{str(memory_count)}"
depth = 1
try:
if filling:
depth_list = [memory_node.depth for memory_node in self.storage if memory_node.memory_id in filling]
depth += max(depth_list)
except Exception as exp:
logger.warning(f"filling init occur {exp}")
pass
memory_node = BasicMemory(
memory_id=memory_id,
memory_count=memory_count,
type_count=type_count,
memory_type=memory_type,
depth=depth,
created=created,
expiration=expiration,
subject=s,
predicate=p,
object=o,
description=content,
embedding_key=embedding_pair[0],
poignancy=poignancy,
keywords=keywords,
filling=filling,
)
keywords = [i.lower() for i in keywords]
for kw in keywords:
if kw in self.thought_keywords:
self.thought_keywords[kw][0:0] = [memory_node]
else:
self.thought_keywords[kw] = [memory_node]
self.add(memory_node)
if f"{p} {o}" != "is idle":
for kw in keywords:
if kw in self.kw_strength_thought:
self.kw_strength_thought[kw] += 1
else:
self.kw_strength_thought[kw] = 1
self.embeddings[embedding_pair[0]] = embedding_pair[1]
return memory_node
def add_event(self, created, expiration, s, p, o, content, keywords, poignancy, embedding_pair, filling):
"""
调用add方法,初始化event
"""
memory_count = len(self.storage) + 1
type_count = len(self.event_list) + 1
memory_type = "event"
memory_id = f"node_{str(memory_count)}"
depth = 0
if "(" in content:
content = " ".join(content.split()[:3]) + " " + content.split("(")[-1][:-1]
memory_node = BasicMemory(
memory_id=memory_id,
memory_count=memory_count,
type_count=type_count,
memory_type=memory_type,
depth=depth,
created=created,
expiration=expiration,
subject=s,
predicate=p,
object=o,
description=content,
embedding_key=embedding_pair[0],
poignancy=poignancy,
keywords=keywords,
filling=filling,
)
keywords = [i.lower() for i in keywords]
for kw in keywords:
if kw in self.event_keywords:
self.event_keywords[kw][0:0] = [memory_node]
else:
self.event_keywords[kw] = [memory_node]
self.add(memory_node)
if f"{p} {o}" != "is idle":
for kw in keywords:
if kw in self.kw_strength_event:
self.kw_strength_event[kw] += 1
else:
self.kw_strength_event[kw] = 1
self.embeddings[embedding_pair[0]] = embedding_pair[1]
return memory_node
def get_summarized_latest_events(self, retention):
ret_set = set()
for e_node in self.event_list[:retention]:
ret_set.add(e_node.summary())
return ret_set
def get_last_chat(self, target_role_name: str):
if target_role_name.lower() in self.chat_keywords:
return self.chat_keywords[target_role_name.lower()][0]
else:
return False
def retrieve_relevant_thoughts(self, s_content: str, p_content: str, o_content: str) -> set:
contents = [s_content, p_content, o_content]
ret = []
for i in contents:
if i in self.thought_keywords:
ret += self.thought_keywords[i.lower()]
ret = set(ret)
return ret
def retrieve_relevant_events(self, s_content: str, p_content: str, o_content: str) -> set:
contents = [s_content, p_content, o_content]
ret = []
for i in contents:
if i in self.event_keywords:
ret += self.event_keywords[i]
ret = set(ret)
return ret
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/memory/retrieve.py | metagpt/ext/stanford_town/memory/retrieve.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : Retrieve函数实现
import datetime
from numpy import dot
from numpy.linalg import norm
from metagpt.ext.stanford_town.memory.agent_memory import BasicMemory
from metagpt.ext.stanford_town.utils.utils import get_embedding
def agent_retrieve(
agent_memory,
curr_time: datetime.datetime,
memory_forget: float,
query: str,
nodes: list[BasicMemory],
topk: int = 4,
) -> list[BasicMemory]:
"""
Retrieve需要集合Role使用,原因在于Role才具有AgentMemory,scratch
逻辑:Role调用该函数,self.rc.AgentMemory,self.rc.scratch.curr_time,self.rc.scratch.memory_forget
输入希望查询的内容与希望回顾的条数,返回TopK条高分记忆,即List[BasicMemory]
Score_lists示例
{
"memory": memories[i], BasicMemory类
"importance": memories[i].poignancy
"recency": 衰减因子计算结果
"relevance": 搜索结果
}
"""
memories = nodes
agent_memory_embedding = agent_memory.embeddings
memories = sorted(memories, key=lambda memory_node: memory_node.last_accessed, reverse=True)
score_list = []
score_list = extract_importance(memories, score_list)
score_list = extract_recency(curr_time, memory_forget, score_list)
score_list = extract_relevance(agent_memory_embedding, query, score_list)
score_list = normalize_score_floats(score_list, 0, 1)
total_dict = {}
gw = [1, 1, 1] # 三个因素的权重,重要性,近因性,相关性,
for i in range(len(score_list)):
total_score = (
score_list[i]["importance"] * gw[0] + score_list[i]["recency"] * gw[1] + score_list[i]["relevance"] * gw[2]
)
total_dict[score_list[i]["memory"].memory_id] = total_score
result = top_highest_x_values(total_dict, topk)
return result # 返回的是一个BasicMemory列表
def new_agent_retrieve(role, focus_points: list, n_count=30) -> dict:
"""
输入为role,关注点列表,返回记忆数量
输出为字典,键为focus_point,值为对应的记忆列表
"""
retrieved = dict()
for focal_pt in focus_points:
nodes = [
[i.last_accessed, i]
for i in role.memory.event_list + role.memory.thought_list
if "idle" not in i.embedding_key
]
nodes = sorted(nodes, key=lambda x: x[0])
nodes = [i for created, i in nodes]
results = agent_retrieve(
role.memory, role.scratch.curr_time, role.scratch.recency_decay, focal_pt, nodes, n_count
)
final_result = []
for n in results:
for i in role.memory.storage:
if i.memory_id == n:
i.last_accessed = role.scratch.curr_time
final_result.append(i)
retrieved[focal_pt] = final_result
return retrieved
def top_highest_x_values(d, x):
"""
输入字典,Topx
返回以字典值排序,字典键组成的List[BasicMemory]
"""
top_v = [item[0] for item in sorted(d.items(), key=lambda item: item[1], reverse=True)[:x]]
return top_v
def extract_importance(memories, score_list):
"""
抽取重要性
"""
for i in range(len(memories)):
score = {"memory": memories[i], "importance": memories[i].poignancy}
score_list.append(score)
return score_list
def extract_relevance(agent_memory_embedding, query, score_list):
"""
抽取相关性
"""
query_embedding = get_embedding(query)
# 进行
for i in range(len(score_list)):
node_embedding = agent_memory_embedding[score_list[i]["memory"].embedding_key]
result = cos_sim(node_embedding, query_embedding)
score_list[i]["relevance"] = result
return score_list
def extract_recency(curr_time, memory_forget, score_list):
"""
抽取近因性,目前使用的现实世界过一天走一个衰减因子
"""
for i in range(len(score_list)):
day_count = (curr_time - score_list[i]["memory"].created).days
score_list[i]["recency"] = memory_forget**day_count
return score_list
def cos_sim(a, b):
"""
计算余弦相似度
"""
return dot(a, b) / (norm(a) * norm(b))
def normalize_list_floats(single_list, target_min, target_max):
"""
单个列表归一化
"""
if len(single_list) == 0:
return []
min_val = min(single_list)
max_val = max(single_list)
range_val = max_val - min_val
if range_val == 0:
for i in range(len(single_list)):
single_list[i] = (target_max - target_min) / 2
else:
for i in range(len(single_list)):
single_list[i] = (single_list[i] - min_val) * (target_max - target_min) / range_val + target_min
return single_list
def normalize_score_floats(score_list, target_min, target_max):
"""
整体归一化
"""
importance_list = []
relevance_list = []
recency_list = []
for i in range(len(score_list)):
importance_list.append(score_list[i]["importance"])
relevance_list.append(score_list[i]["relevance"])
recency_list.append(score_list[i]["recency"])
# 进行归一化操作
importance_list = normalize_list_floats(importance_list, target_min, target_max)
relevance_list = normalize_list_floats(relevance_list, target_min, target_max)
recency_list = normalize_list_floats(recency_list, target_min, target_max)
for i in range(len(score_list)):
score_list[i]["importance"] = importance_list[i]
score_list[i]["relevance"] = relevance_list[i]
score_list[i]["recency"] = recency_list[i]
return score_list
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/memory/scratch.py | metagpt/ext/stanford_town/memory/scratch.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : Scratch类实现(角色信息类)
from datetime import datetime, timedelta
from pathlib import Path
from typing import Optional, Union
from pydantic import BaseModel, Field, field_serializer, field_validator
from metagpt.utils.common import read_json_file, write_json_file
class Scratch(BaseModel):
# 类别1:人物超参
vision_r: int = 4
att_bandwidth: int = 3
retention: int = 5
# 类别2:世界信息
curr_time: Optional[datetime] = Field(default=None)
curr_tile: Optional[list[int]] = Field(default=None)
daily_plan_req: Optional[str] = Field(default=None)
# 类别3:人物角色的核心身份
name: Optional[str] = Field(default=None)
first_name: Optional[str] = Field(default=None)
last_name: Optional[str] = Field(default=None)
age: Optional[int] = Field(default=None)
innate: Optional[str] = Field(default=None) # L0 permanent core traits.
learned: Optional[str] = Field(default=None) # L1 stable traits.
currently: Optional[str] = Field(default=None) # L2 external implementation.
lifestyle: Optional[str] = Field(default=None)
living_area: Optional[str] = Field(default=None)
# 类别4:旧反思变量
concept_forget: int = 100
daily_reflection_time: int = 60 * 3
daily_reflection_size: int = 5
overlap_reflect_th: int = 2
kw_strg_event_reflect_th: int = 4
kw_strg_thought_reflect_th: int = 4
# 类别5:新反思变量
recency_w: int = 1
relevance_w: int = 1
importance_w: int = 1
recency_decay: float = 0.99
importance_trigger_max: int = 150
importance_trigger_curr: int = 150
importance_ele_n: int = 0
thought_count: int = 5
# 类别6:个人计划
daily_req: list[str] = Field(default=[])
f_daily_schedule: list[list[Union[int, str]]] = Field(default=[])
f_daily_schedule_hourly_org: list[list[Union[int, str]]] = Field(default=[])
# 类别7:当前动作
act_address: Optional[str] = Field(default=None)
act_start_time: Optional[datetime] = Field(default=None)
act_duration: Optional[int] = Field(default=None)
act_description: Optional[str] = Field(default=None)
act_pronunciatio: Optional[str] = Field(default=None)
act_event: list[Optional[str]] = [None, None, None]
act_obj_description: Optional[str] = Field(default=None)
act_obj_pronunciatio: Optional[str] = Field(default=None)
act_obj_event: list[Optional[str]] = [None, None, None]
chatting_with: Optional[str] = Field(default=None)
chat: Optional[str] = Field(default=None)
chatting_with_buffer: dict = dict()
chatting_end_time: Optional[datetime] = Field(default=None)
act_path_set: bool = False
planned_path: list[list[int]] = Field(default=[])
@field_validator("curr_time", "act_start_time", "chatting_end_time", mode="before")
@classmethod
def check_time_filed(cls, time_filed):
val = datetime.strptime(time_filed, "%B %d, %Y, %H:%M:%S") if time_filed else None
return val
@field_serializer("curr_time", "act_start_time", "chatting_end_time")
def transform_time_field(self, time_filed: Optional[datetime]) -> str:
if time_filed:
time_filed = time_filed.strftime("%B %d, %Y, %H:%M:%S")
return time_filed
@classmethod
def init_scratch_from_path(cls, f_saved: Path):
scratch_load = read_json_file(f_saved)
scratch = Scratch(**scratch_load)
return scratch
def save(self, out_json: Path):
"""
Save persona's scratch.
INPUT:
out_json: The file where we wil be saving our persona's state.
OUTPUT:
None
"""
scratch = self.model_dump()
write_json_file(out_json, scratch, encoding="utf-8")
def get_f_daily_schedule_index(self, advance=0):
"""
We get the current index of self.f_daily_schedule.
Recall that self.f_daily_schedule stores the decomposed action sequences
up until now, and the hourly sequences of the future action for the rest
of today. Given that self.f_daily_schedule is a list of list where the
inner list is composed of [task, duration], we continue to add up the
duration until we reach "if elapsed > today_min_elapsed" condition. The
index where we stop is the index we will return.
INPUT
advance: Integer value of the number minutes we want to look into the
future. This allows us to get the index of a future timeframe.
OUTPUT
an integer value for the current index of f_daily_schedule.
"""
# We first calculate teh number of minutes elapsed today.
today_min_elapsed = 0
today_min_elapsed += self.curr_time.hour * 60
today_min_elapsed += self.curr_time.minute
today_min_elapsed += advance
x = 0
for task, duration in self.f_daily_schedule:
x += duration
x = 0
for task, duration in self.f_daily_schedule_hourly_org:
x += duration
# We then calculate the current index based on that.
curr_index = 0
elapsed = 0
for task, duration in self.f_daily_schedule:
elapsed += duration
if elapsed > today_min_elapsed:
return curr_index
curr_index += 1
return curr_index
def get_f_daily_schedule_hourly_org_index(self, advance=0):
"""
We get the current index of self.f_daily_schedule_hourly_org.
It is otherwise the same as get_f_daily_schedule_index.
INPUT
advance: Integer value of the number minutes we want to look into the
future. This allows us to get the index of a future timeframe.
OUTPUT
an integer value for the current index of f_daily_schedule.
"""
# We first calculate teh number of minutes elapsed today.
today_min_elapsed = 0
today_min_elapsed += self.curr_time.hour * 60
today_min_elapsed += self.curr_time.minute
today_min_elapsed += advance
# We then calculate the current index based on that.
curr_index = 0
elapsed = 0
for task, duration in self.f_daily_schedule_hourly_org:
elapsed += duration
if elapsed > today_min_elapsed:
return curr_index
curr_index += 1
return curr_index
def get_str_iss(self):
"""
ISS stands for "identity stable set." This describes the commonset summary
of this persona -- basically, the bare minimum description of the persona
that gets used in almost all prompts that need to call on the persona.
INPUT
None
OUTPUT
the identity stable set summary of the persona in a string form.
EXAMPLE STR OUTPUT
"Name: Dolores Heitmiller
Age: 28
Innate traits: hard-edged, independent, loyal
Learned traits: Dolores is a painter who wants live quietly and paint
while enjoying her everyday life.
Currently: Dolores is preparing for her first solo show. She mostly
works from home.
Lifestyle: Dolores goes to bed around 11pm, sleeps for 7 hours, eats
dinner around 6pm.
Daily plan requirement: Dolores is planning to stay at home all day and
never go out."
"""
commonset = ""
commonset += f"Name: {self.name}\n"
commonset += f"Age: {self.age}\n"
commonset += f"Innate traits: {self.innate}\n"
commonset += f"Learned traits: {self.learned}\n"
commonset += f"Currently: {self.currently}\n"
commonset += f"Lifestyle: {self.lifestyle}\n"
commonset += f"Daily plan requirement: {self.daily_plan_req}\n"
commonset += f"Current Date: {self.curr_time.strftime('%A %B %d') if self.curr_time else ''}\n"
return commonset
def get_str_name(self):
return self.name
def get_str_firstname(self):
return self.first_name
def get_str_lastname(self):
return self.last_name
def get_str_age(self):
return str(self.age)
def get_str_innate(self):
return self.innate
def get_str_learned(self):
return self.learned
def get_str_currently(self):
return self.currently
def get_str_lifestyle(self):
return self.lifestyle
def get_str_daily_plan_req(self):
return self.daily_plan_req
def get_str_curr_date_str(self):
return self.curr_time.strftime("%A %B %d")
def get_curr_event(self):
if not self.act_address:
return self.name, None, None
else:
return self.act_event
def get_curr_event_and_desc(self):
if not self.act_address:
return self.name, None, None, None
else:
return self.act_event[0], self.act_event[1], self.act_event[2], self.act_description
def get_curr_obj_event_and_desc(self):
if not self.act_address:
return "", None, None, None
else:
return self.act_address, self.act_obj_event[1], self.act_obj_event[2], self.act_obj_description
def add_new_action(
self,
action_address,
action_duration,
action_description,
action_pronunciatio,
action_event,
chatting_with,
chat,
chatting_with_buffer,
chatting_end_time,
act_obj_description,
act_obj_pronunciatio,
act_obj_event,
act_start_time=None,
):
self.act_address = action_address
self.act_duration = action_duration
self.act_description = action_description
self.act_pronunciatio = action_pronunciatio
self.act_event = action_event
self.chatting_with = chatting_with
self.chat = chat
if chatting_with_buffer:
self.chatting_with_buffer.update(chatting_with_buffer)
self.chatting_end_time = chatting_end_time
self.act_obj_description = act_obj_description
self.act_obj_pronunciatio = act_obj_pronunciatio
self.act_obj_event = act_obj_event
self.act_start_time = self.curr_time
self.act_path_set = False
def act_time_str(self):
"""
Returns a string output of the current time.
INPUT
None
OUTPUT
A string output of the current time.
EXAMPLE STR OUTPUT
"14:05 P.M."
"""
return self.act_start_time.strftime("%H:%M %p")
def act_check_finished(self):
"""
Checks whether the self.Action instance has finished.
INPUT
curr_datetime: Current time. If current time is later than the action's
start time + its duration, then the action has finished.
OUTPUT
Boolean [True]: Action has finished.
Boolean [False]: Action has not finished and is still ongoing.
"""
if not self.act_address:
return True
if self.chatting_with:
end_time = self.chatting_end_time
else:
x = self.act_start_time
if x.second != 0:
x = x.replace(second=0)
x = x + timedelta(minutes=1)
end_time = x + timedelta(minutes=self.act_duration)
if end_time.strftime("%H:%M:%S") == self.curr_time.strftime("%H:%M:%S"):
return True
return False
def act_summarize(self):
"""
Summarize the current action as a dictionary.
INPUT
None
OUTPUT
ret: A human readable summary of the action.
"""
exp = dict()
exp["persona"] = self.name
exp["address"] = self.act_address
exp["start_datetime"] = self.act_start_time
exp["duration"] = self.act_duration
exp["description"] = self.act_description
exp["pronunciatio"] = self.act_pronunciatio
return exp
def act_summary_str(self):
"""
Returns a string summary of the current action. Meant to be
human-readable.
INPUT
None
OUTPUT
ret: A human readable summary of the action.
"""
start_datetime_str = self.act_start_time.strftime("%A %B %d -- %H:%M %p")
ret = f"[{start_datetime_str}]\n"
ret += f"Activity: {self.name} is {self.act_description}\n"
ret += f"Address: {self.act_address}\n"
ret += f"Duration in minutes (e.g., x min): {str(self.act_duration)} min\n"
return ret
def get_daily_schedule(self, daily_schedule: list[list[str]]):
ret = ""
curr_min_sum = 0
for row in daily_schedule:
curr_min_sum += row[1]
hour = int(curr_min_sum / 60)
minute = curr_min_sum % 60
ret += f"{hour:02}:{minute:02} || {row[0]}\n"
return ret
def get_str_daily_schedule_summary(self):
return self.get_daily_schedule(self.f_daily_schedule)
def get_str_daily_schedule_hourly_org_summary(self):
return self.get_daily_schedule(self.f_daily_schedule_hourly_org)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/memory/__init__.py | metagpt/ext/stanford_town/memory/__init__.py | python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false | |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/memory/spatial_memory.py | metagpt/ext/stanford_town/memory/spatial_memory.py | """
Author: Joon Sung Park (joonspk@stanford.edu)
File: spatial_memory.py
Description: Defines the MemoryTree class that serves as the agents' spatial
memory that aids in grounding their behavior in the game world.
"""
from pathlib import Path
from pydantic import BaseModel, Field
from metagpt.logs import logger
from metagpt.utils.common import read_json_file, write_json_file
class MemoryTree(BaseModel):
tree: dict = Field(default=dict)
def set_mem_path(self, f_saved: Path):
self.tree = read_json_file(f_saved)
def print_tree(self) -> None:
def _print_tree(tree, depth):
dash = " >" * depth
if isinstance(tree, list):
if tree:
logger.info(f"{dash} {tree}")
return
for key, val in tree.items():
if key:
logger.info(f"{dash} {tree}")
_print_tree(val, depth + 1)
_print_tree(self.tree, 0)
def save(self, out_json: Path) -> None:
write_json_file(out_json, self.tree)
def get_str_accessible_sectors(self, curr_world: str) -> str:
"""
Returns a summary string of all the arenas that the persona can access
within the current sector.
Note that there are places a given persona cannot enter. This information
is provided in the persona sheet. We account for this in this function.
INPUT
None
OUTPUT
A summary string of all the arenas that the persona can access.
EXAMPLE STR OUTPUT
"bedroom, kitchen, dining room, office, bathroom"
"""
x = ", ".join(list(self.tree[curr_world].keys()))
return x
def get_str_accessible_sector_arenas(self, sector: str) -> str:
"""
Returns a summary string of all the arenas that the persona can access
within the current sector.
Note that there are places a given persona cannot enter. This information
is provided in the persona sheet. We account for this in this function.
INPUT
None
OUTPUT
A summary string of all the arenas that the persona can access.
EXAMPLE STR OUTPUT
"bedroom, kitchen, dining room, office, bathroom"
"""
curr_world, curr_sector = sector.split(":")
if not curr_sector:
return ""
x = ", ".join(list(self.tree[curr_world][curr_sector].keys()))
return x
def get_str_accessible_arena_game_objects(self, arena: str) -> str:
"""
Get a str list of all accessible game objects that are in the arena. If
temp_address is specified, we return the objects that are available in
that arena, and if not, we return the objects that are in the arena our
persona is currently in.
INPUT
temp_address: optional arena address
OUTPUT
str list of all accessible game objects in the gmae arena.
EXAMPLE STR OUTPUT
"phone, charger, bed, nightstand"
"""
curr_world, curr_sector, curr_arena = arena.split(":")
if not curr_arena:
return ""
try:
x = ", ".join(list(self.tree[curr_world][curr_sector][curr_arena]))
except Exception:
x = ", ".join(list(self.tree[curr_world][curr_sector][curr_arena.lower()]))
return x
def add_tile_info(self, tile_info: dict) -> None:
if tile_info["world"]:
if tile_info["world"] not in self.tree:
self.tree[tile_info["world"]] = {}
if tile_info["sector"]:
if tile_info["sector"] not in self.tree[tile_info["world"]]:
self.tree[tile_info["world"]][tile_info["sector"]] = {}
if tile_info["arena"]:
if tile_info["arena"] not in self.tree[tile_info["world"]][tile_info["sector"]]:
self.tree[tile_info["world"]][tile_info["sector"]][tile_info["arena"]] = []
if tile_info["game_object"]:
if tile_info["game_object"] not in self.tree[tile_info["world"]][tile_info["sector"]][tile_info["arena"]]:
self.tree[tile_info["world"]][tile_info["sector"]][tile_info["arena"]] += [tile_info["game_object"]]
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/utils/const.py | metagpt/ext/stanford_town/utils/const.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
from pathlib import Path
from metagpt.const import EXAMPLE_PATH
ST_ROOT_PATH = Path(__file__).parent.parent
STORAGE_PATH = EXAMPLE_PATH.joinpath("stanford_town/storage")
TEMP_STORAGE_PATH = EXAMPLE_PATH.joinpath("stanford_town/temp_storage")
MAZE_ASSET_PATH = ST_ROOT_PATH.joinpath("static_dirs/assets/the_ville")
PROMPTS_DIR = ST_ROOT_PATH.joinpath("prompts")
collision_block_id = "32125"
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/utils/utils.py | metagpt/ext/stanford_town/utils/utils.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : utils
import csv
import errno
import json
import os
import shutil
import time
from pathlib import Path
from typing import Union
from openai import OpenAI
from metagpt.config2 import config
from metagpt.logs import logger
def read_csv_to_list(curr_file: str, header=False, strip_trail=True):
"""
Reads in a csv file to a list of list. If header is True, it returns a
tuple with (header row, all rows)
ARGS:
curr_file: path to the current csv file.
RETURNS:
List of list where the component lists are the rows of the file.
"""
logger.debug(f"start read csv: {curr_file}")
if not header:
analysis_list = []
with open(curr_file) as f_analysis_file:
data_reader = csv.reader(f_analysis_file, delimiter=",")
for count, row in enumerate(data_reader):
if strip_trail:
row = [i.strip() for i in row]
analysis_list += [row]
return analysis_list
else:
analysis_list = []
with open(curr_file) as f_analysis_file:
data_reader = csv.reader(f_analysis_file, delimiter=",")
for count, row in enumerate(data_reader):
if strip_trail:
row = [i.strip() for i in row]
analysis_list += [row]
return analysis_list[0], analysis_list[1:]
def get_embedding(text, model: str = "text-embedding-ada-002"):
text = text.replace("\n", " ")
embedding = None
if not text:
text = "this is blank"
for idx in range(3):
try:
embedding = (
OpenAI(api_key=config.llm.api_key).embeddings.create(input=[text], model=model).data[0].embedding
)
except Exception as exp:
logger.info(f"get_embedding failed, exp: {exp}, will retry.")
time.sleep(5)
if not embedding:
raise ValueError("get_embedding failed")
return embedding
def extract_first_json_dict(data_str: str) -> Union[None, dict]:
# Find the first occurrence of a JSON object within the string
start_idx = data_str.find("{")
end_idx = data_str.find("}", start_idx) + 1
# Check if both start and end indices were found
if start_idx == -1 or end_idx == 0:
return None
# Extract the first JSON dictionary
json_str = data_str[start_idx:end_idx]
try:
# Attempt to parse the JSON data
json_dict = json.loads(json_str)
return json_dict
except json.JSONDecodeError:
# If parsing fails, return None
return None
def path_finder_v2(a, start, end, collision_block_char) -> list[int]:
def make_step(m, k):
for i in range(len(m)):
for j in range(len(m[i])):
if m[i][j] == k:
if i > 0 and m[i - 1][j] == 0 and a[i - 1][j] == 0:
m[i - 1][j] = k + 1
if j > 0 and m[i][j - 1] == 0 and a[i][j - 1] == 0:
m[i][j - 1] = k + 1
if i < len(m) - 1 and m[i + 1][j] == 0 and a[i + 1][j] == 0:
m[i + 1][j] = k + 1
if j < len(m[i]) - 1 and m[i][j + 1] == 0 and a[i][j + 1] == 0:
m[i][j + 1] = k + 1
new_maze = []
for row in a:
new_row = []
for j in row:
if j == collision_block_char:
new_row += [1]
else:
new_row += [0]
new_maze += [new_row]
a = new_maze
m = []
for i in range(len(a)):
m.append([])
for j in range(len(a[i])):
m[-1].append(0)
i, j = start
m[i][j] = 1
k = 0
except_handle = 150
while m[end[0]][end[1]] == 0:
k += 1
make_step(m, k)
if except_handle == 0:
break
except_handle -= 1
i, j = end
k = m[i][j]
the_path = [(i, j)]
while k > 1:
if i > 0 and m[i - 1][j] == k - 1:
i, j = i - 1, j
the_path.append((i, j))
k -= 1
elif j > 0 and m[i][j - 1] == k - 1:
i, j = i, j - 1
the_path.append((i, j))
k -= 1
elif i < len(m) - 1 and m[i + 1][j] == k - 1:
i, j = i + 1, j
the_path.append((i, j))
k -= 1
elif j < len(m[i]) - 1 and m[i][j + 1] == k - 1:
i, j = i, j + 1
the_path.append((i, j))
k -= 1
the_path.reverse()
return the_path
def path_finder(collision_maze: list, start: list[int], end: list[int], collision_block_char: str) -> list[int]:
# EMERGENCY PATCH
start = (start[1], start[0])
end = (end[1], end[0])
# END EMERGENCY PATCH
path = path_finder_v2(collision_maze, start, end, collision_block_char)
new_path = []
for i in path:
new_path += [(i[1], i[0])]
path = new_path
return path
def create_folder_if_not_there(curr_path):
"""
Checks if a folder in the curr_path exists. If it does not exist, creates
the folder.
Note that if the curr_path designates a file location, it will operate on
the folder that contains the file. But the function also works even if the
path designates to just a folder.
Args:
curr_list: list to write. The list comes in the following form:
[['key1', 'val1-1', 'val1-2'...],
['key2', 'val2-1', 'val2-2'...],]
outfile: name of the csv file to write
RETURNS:
True: if a new folder is created
False: if a new folder is not created
"""
outfolder_name = curr_path.split("/")
if len(outfolder_name) != 1:
# This checks if the curr path is a file or a folder.
if "." in outfolder_name[-1]:
outfolder_name = outfolder_name[:-1]
outfolder_name = "/".join(outfolder_name)
if not os.path.exists(outfolder_name):
os.makedirs(outfolder_name)
return True
return False
def find_filenames(path_to_dir, suffix=".csv"):
"""
Given a directory, find all files that end with the provided suffix and
return their paths.
ARGS:
path_to_dir: Path to the current directory
suffix: The target suffix.
RETURNS:
A list of paths to all files in the directory.
"""
filenames = os.listdir(path_to_dir)
return [path_to_dir + "/" + filename for filename in filenames if filename.endswith(suffix)]
def copy_folder(src_folder: str, dest_folder: str):
try:
if Path(dest_folder).exists():
logger.warning(f"{dest_folder} exist, start to remove.")
shutil.rmtree(dest_folder)
shutil.copytree(src_folder, dest_folder)
except OSError as exc: # python >2.5
if exc.errno in (errno.ENOTDIR, errno.EINVAL):
shutil.copy(src_folder, dest_folder)
else:
raise
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/utils/mg_ga_transform.py | metagpt/ext/stanford_town/utils/mg_ga_transform.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : data transform of mg <-> ga under storage
from pathlib import Path
from typing import Optional
from metagpt.ext.stanford_town.utils.const import STORAGE_PATH, TEMP_STORAGE_PATH
from metagpt.logs import logger
from metagpt.utils.common import read_json_file, write_json_file
def get_reverie_meta(sim_code: str) -> dict:
meta_file_path = STORAGE_PATH.joinpath(sim_code).joinpath("reverie/meta.json")
reverie_meta = read_json_file(meta_file_path)
return reverie_meta
def save_movement(role_name: str, role_move: dict, step: int, sim_code: str, curr_time: str):
movement_path = STORAGE_PATH.joinpath(f"{sim_code}/movement/{step}.json")
if not movement_path.parent.exists():
movement_path.parent.mkdir(exist_ok=True)
if movement_path.exists():
movement = read_json_file(movement_path)
else:
movement = {"persona": dict(), "meta": dict()}
movement["persona"][role_name] = role_move
movement["meta"]["curr_time"] = curr_time.strftime("%B %d, %Y, %H:%M:%S")
write_json_file(movement_path, movement)
logger.info(f"save_movement at step: {step}, curr_time: {movement['meta']['curr_time']}")
def save_environment(role_name: str, step: int, sim_code: str, movement: list[int]):
environment_path = STORAGE_PATH.joinpath(f"{sim_code}/environment/{step}.json")
if not environment_path.parent.exists():
environment_path.parent.mkdir(exist_ok=True)
if environment_path.exists():
environment = read_json_file(environment_path)
else:
environment = {}
environment[role_name] = {"maze": "the_ville", "x": movement[0], "y": movement[1]}
write_json_file(environment_path, environment)
logger.info(f"save_environment at step: {step}")
def get_role_environment(sim_code: str, role_name: str, step: int = 0) -> dict:
env_path = STORAGE_PATH.joinpath(f"{sim_code}/environment/{step}.json")
role_env = None
if env_path.exists():
env_info = read_json_file(env_path)
role_env = env_info.get(role_name, None)
return role_env
def write_curr_sim_code(curr_sim_code: dict, temp_storage_path: Optional[Path] = None):
if temp_storage_path is None:
temp_storage_path = TEMP_STORAGE_PATH
else:
temp_storage_path = Path(temp_storage_path)
write_json_file(temp_storage_path.joinpath("curr_sim_code.json"), curr_sim_code)
def write_curr_step(curr_step: dict, temp_storage_path: Optional[Path] = None):
if temp_storage_path is None:
temp_storage_path = TEMP_STORAGE_PATH
else:
temp_storage_path = Path(temp_storage_path)
write_json_file(temp_storage_path.joinpath("curr_step.json"), curr_step)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/utils/__init__.py | metagpt/ext/stanford_town/utils/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/roles/__init__.py | metagpt/ext/stanford_town/roles/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/roles/st_role.py | metagpt/ext/stanford_town/roles/st_role.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : Stanford Town role
"""
Do the steps following:
- perceive, receive environment(Maze) info
- retrieve, retrieve memories
- plan, do plan like long-term plan and interact with Maze
- reflect, do the High-level thinking based on memories and re-add into the memory
- execute, move or else in the Maze
"""
import math
import random
import time
from datetime import datetime, timedelta
from operator import itemgetter
from pathlib import Path
from typing import Optional
from pydantic import ConfigDict, Field, field_validator, model_validator
from metagpt.actions.add_requirement import UserRequirement
from metagpt.environment.stanford_town.env_space import (
EnvAction,
EnvActionType,
EnvObsParams,
EnvObsType,
)
from metagpt.environment.stanford_town.stanford_town_env import StanfordTownEnv
from metagpt.ext.stanford_town.actions.dummy_action import DummyAction, DummyMessage
from metagpt.ext.stanford_town.actions.inner_voice_action import (
AgentWhisperThoughtAction,
)
from metagpt.ext.stanford_town.actions.run_reflect_action import AgentEventTriple
from metagpt.ext.stanford_town.memory.agent_memory import AgentMemory, BasicMemory
from metagpt.ext.stanford_town.memory.scratch import Scratch
from metagpt.ext.stanford_town.memory.spatial_memory import MemoryTree
from metagpt.ext.stanford_town.plan.st_plan import plan
from metagpt.ext.stanford_town.reflect.reflect import generate_poig_score, role_reflect
from metagpt.ext.stanford_town.utils.const import STORAGE_PATH, collision_block_id
from metagpt.ext.stanford_town.utils.mg_ga_transform import (
get_role_environment,
save_environment,
save_movement,
)
from metagpt.ext.stanford_town.utils.utils import get_embedding, path_finder
from metagpt.logs import logger
from metagpt.roles.role import Role, RoleContext
from metagpt.schema import Message
from metagpt.utils.common import any_to_str
class STRoleContext(RoleContext):
model_config = ConfigDict(arbitrary_types_allowed=True)
env: StanfordTownEnv = Field(default=None, exclude=True)
memory: AgentMemory = Field(default_factory=AgentMemory)
scratch: Scratch = Field(default_factory=Scratch)
spatial_memory: MemoryTree = Field(default_factory=MemoryTree)
class STRole(Role):
# add a role's property structure to store role's age and so on like GA's Scratch.
model_config = ConfigDict(arbitrary_types_allowed=True, extra="allow")
name: str = Field(default="Klaus Mueller")
profile: str = Field(default="STMember")
rc: STRoleContext = Field(default_factory=STRoleContext)
sim_code: str = Field(default="new_sim")
step: int = Field(default=0)
start_time: Optional[datetime] = Field(default=None)
curr_time: Optional[datetime] = Field(default=None)
sec_per_step: int = Field(default=10)
game_obj_cleanup: dict = Field(default_factory=dict)
inner_voice: bool = Field(default=False)
has_inner_voice: bool = Field(default=False)
role_storage_path: Optional[Path] = Field(default=None)
@field_validator("curr_time", mode="before")
@classmethod
def check_curr_time(cls, curr_time: str) -> datetime:
return datetime.strptime(curr_time, "%B %d, %Y, %H:%M:%S")
@field_validator("start_time", mode="before")
@classmethod
def check_start_time(cls, start_time: str) -> datetime:
return datetime.strptime(f"{start_time}, 00:00:00", "%B %d, %Y, %H:%M:%S")
@model_validator(mode="after")
def validate_st_role_after(self):
self.role_storage_path = STORAGE_PATH.joinpath(f"{self.sim_code}/personas/{self.name}")
self.load_from() # load role's memory
self.set_actions([])
if self.has_inner_voice:
# TODO add communication action
self._watch([UserRequirement, DummyAction])
else:
self._watch([DummyAction])
async def init_curr_tile(self):
# init role
role_env: dict = get_role_environment(self.sim_code, self.name, self.step)
pt_x = role_env["x"]
pt_y = role_env["y"]
self.rc.scratch.curr_tile = (pt_x, pt_y)
self.rc.env.step(
EnvAction(
action_type=EnvActionType.ADD_TILE_EVENT,
coord=(pt_x, pt_y),
event=self.scratch.get_curr_event_and_desc(),
)
)
@property
def scratch(self):
return self.rc.scratch
@property
def role_tile(self):
return self.scratch.curr_tile
@property
def a_mem(self):
return self.rc.memory
@property
def s_mem(self):
return self.rc.spatial_memory
@property
def memory(self):
return self.rc.memory
def load_from(self):
"""
load role data from `storage/{simulation_name}/personas/{role_name}`
"""
memory_saved = self.role_storage_path.joinpath("bootstrap_memory/associative_memory")
self.rc.memory.set_mem_path(memory_saved)
sp_mem_saved = self.role_storage_path.joinpath("bootstrap_memory/spatial_memory.json")
self.rc.spatial_memory.set_mem_path(f_saved=sp_mem_saved)
scratch_f_saved = self.role_storage_path.joinpath("bootstrap_memory/scratch.json")
self.rc.scratch = Scratch.init_scratch_from_path(f_saved=scratch_f_saved)
logger.info(f"Role: {self.name} loaded role's memory from {str(self.role_storage_path)}")
def save_into(self):
"""
save role data from `storage/{simulation_name}/personas/{role_name}`
"""
memory_saved = self.role_storage_path.joinpath("bootstrap_memory/associative_memory")
self.rc.memory.save(memory_saved)
sp_mem_saved = self.role_storage_path.joinpath("bootstrap_memory/spatial_memory.json")
self.rc.spatial_memory.save(sp_mem_saved)
scratch_f_saved = self.role_storage_path.joinpath("bootstrap_memory/scratch.json")
self.rc.scratch.save(scratch_f_saved)
logger.info(f"Role: {self.name} saved role's memory into {str(self.role_storage_path)}")
async def _observe(self) -> int:
if not self.rc.env:
return 0
news = []
if not news:
news = self.rc.msg_buffer.pop_all()
old_messages = [] if not self.enable_memory else self.rc.memory.get()
# Filter out messages of interest.
self.rc.news = [
n for n in news if (n.cause_by in self.rc.watch or self.name in n.send_to) and n not in old_messages
]
if len(self.rc.news) == 1 and self.rc.news[0].cause_by == any_to_str(UserRequirement):
logger.warning(f"Role: {self.name} add inner voice: {self.rc.news[0].content}")
await self.add_inner_voice(self.rc.news[0].content)
return 1 # always return 1 to execute role's `_react`
async def add_inner_voice(self, whisper: str):
async def generate_inner_thought(whisper: str):
run_whisper_thought = AgentWhisperThoughtAction()
inner_thought = await run_whisper_thought.run(self, whisper)
return inner_thought
thought = await generate_inner_thought(whisper)
# init scratch curr_time with self.curr_time
self.inner_voice = True
self.rc.scratch.curr_time = self.curr_time
created = self.rc.scratch.curr_time if self.rc.scratch.curr_time else datetime.now()
expiration = created + timedelta(days=30)
run_event_triple = AgentEventTriple()
s, p, o = await run_event_triple.run(thought, self)
keywords = set([s, p, o])
thought_poignancy = await generate_poig_score(self, "event", whisper)
thought_embedding_pair = (thought, get_embedding(thought))
self.rc.memory.add_thought(
created, expiration, s, p, o, thought, keywords, thought_poignancy, thought_embedding_pair, None
)
async def observe(self) -> list[BasicMemory]:
# TODO observe info from maze_env
"""
Perceive events around the role and saves it to the memory, both events
and spaces.
We first perceive the events nearby the role, as determined by its
<vision_r>. If there are a lot of events happening within that radius, we
take the <att_bandwidth> of the closest events. Finally, we check whether
any of them are new, as determined by <retention>. If they are new, then we
save those and return the <BasicMemory> instances for those events.
OUTPUT:
ret_events: a list of <BasicMemory> that are perceived and new.
"""
# PERCEIVE SPACE
# We get the nearby tiles given our current tile and the persona's vision
# radius.
nearby_tiles = self.rc.env.observe(
EnvObsParams(
obs_type=EnvObsType.TILE_NBR, coord=self.rc.scratch.curr_tile, vision_radius=self.rc.scratch.vision_r
)
)
# We then store the perceived space. Note that the s_mem of the persona is
# in the form of a tree constructed using dictionaries.
for tile in nearby_tiles:
tile_info = self.rc.env.observe(EnvObsParams(obs_type=EnvObsType.GET_TITLE, coord=tile))
self.rc.spatial_memory.add_tile_info(tile_info)
# PERCEIVE EVENTS.
# We will perceive events that take place in the same arena as the
# persona's current arena.
curr_arena_path = self.rc.env.observe(
EnvObsParams(obs_type=EnvObsType.TILE_PATH, coord=self.rc.scratch.curr_tile, level="arena")
)
# We do not perceive the same event twice (this can happen if an object is
# extended across multiple tiles).
percept_events_set = set()
# We will order our percept based on the distance, with the closest ones
# getting priorities.
percept_events_list = []
# First, we put all events that are occurring in the nearby tiles into the
# percept_events_list
for tile in nearby_tiles:
tile_details = self.rc.env.observe(EnvObsParams(obs_type=EnvObsType.GET_TITLE, coord=tile))
if tile_details["events"]:
tmp_arena_path = self.rc.env.observe(
EnvObsParams(obs_type=EnvObsType.TILE_PATH, coord=tile, level="arena")
)
if tmp_arena_path == curr_arena_path:
# This calculates the distance between the persona's current tile,
# and the target tile.
dist = math.dist([tile[0], tile[1]], [self.rc.scratch.curr_tile[0], self.rc.scratch.curr_tile[1]])
# Add any relevant events to our temp set/list with the distant info.
for event in tile_details["events"]:
if event not in percept_events_set:
percept_events_list += [[dist, event]]
percept_events_set.add(event)
# We sort, and perceive only self.rc.scratch.att_bandwidth of the closest
# events. If the bandwidth is larger, then it means the persona can perceive
# more elements within a small area.
percept_events_list = sorted(percept_events_list, key=itemgetter(0))
perceived_events = []
for dist, event in percept_events_list[: self.rc.scratch.att_bandwidth]:
perceived_events += [event]
# Storing events.
# <ret_events> is a list of <BasicMemory> instances from the persona's
# associative memory.
ret_events = []
for p_event in perceived_events:
s, p, o, desc = p_event
if not p:
# If the object is not present, then we default the event to "idle".
p = "is"
o = "idle"
desc = "idle"
desc = f"{s.split(':')[-1]} is {desc}"
p_event = (s, p, o)
# We retrieve the latest self.rc.scratch.retention events. If there is
# something new that is happening (that is, p_event not in latest_events),
# then we add that event to the a_mem and return it.
latest_events = self.rc.memory.get_summarized_latest_events(self.rc.scratch.retention)
if p_event not in latest_events:
# We start by managing keywords.
keywords = set()
sub = p_event[0]
obj = p_event[2]
if ":" in p_event[0]:
sub = p_event[0].split(":")[-1]
if ":" in p_event[2]:
obj = p_event[2].split(":")[-1]
keywords.update([sub, obj])
# Get event embedding
desc_embedding_in = desc
if "(" in desc:
desc_embedding_in = desc_embedding_in.split("(")[1].split(")")[0].strip()
if desc_embedding_in in self.rc.memory.embeddings:
event_embedding = self.rc.memory.embeddings[desc_embedding_in]
else:
event_embedding = get_embedding(desc_embedding_in)
event_embedding_pair = (desc_embedding_in, event_embedding)
# Get event poignancy.
event_poignancy = await generate_poig_score(self, "event", desc_embedding_in)
logger.debug(f"Role {self.name} event_poignancy: {event_poignancy}")
# If we observe the persona's self chat, we include that in the memory
# of the persona here.
chat_node_ids = []
if p_event[0] == f"{self.name}" and p_event[1] == "chat with":
curr_event = self.rc.scratch.act_event
if self.rc.scratch.act_description in self.rc.memory.embeddings:
chat_embedding = self.rc.memory.embeddings[self.rc.scratch.act_description]
else:
chat_embedding = get_embedding(self.rc.scratch.act_description)
chat_embedding_pair = (self.rc.scratch.act_description, chat_embedding)
chat_poignancy = await generate_poig_score(self, "chat", self.rc.scratch.act_description)
chat_node = self.rc.memory.add_chat(
self.rc.scratch.curr_time,
None,
curr_event[0],
curr_event[1],
curr_event[2],
self.rc.scratch.act_description,
keywords,
chat_poignancy,
chat_embedding_pair,
self.rc.scratch.chat,
)
chat_node_ids = [chat_node.memory_id]
# Finally, we add the current event to the agent's memory.
ret_events += [
self.rc.memory.add_event(
self.rc.scratch.curr_time,
None,
s,
p,
o,
desc,
keywords,
event_poignancy,
event_embedding_pair,
chat_node_ids,
)
]
self.rc.scratch.importance_trigger_curr -= event_poignancy
self.rc.scratch.importance_ele_n += 1
return ret_events
def retrieve(self, observed: list) -> dict:
# TODO retrieve memories from agent_memory
retrieved = dict()
for event in observed:
retrieved[event.description] = dict()
retrieved[event.description]["curr_event"] = event
relevant_events = self.rc.memory.retrieve_relevant_events(event.subject, event.predicate, event.object)
retrieved[event.description]["events"] = list(relevant_events)
relevant_thoughts = self.rc.memory.retrieve_relevant_thoughts(event.subject, event.predicate, event.object)
retrieved[event.description]["thoughts"] = list(relevant_thoughts)
return retrieved
async def reflect(self):
# TODO reflection if meet reflect condition
await role_reflect(self)
# TODO re-add result to memory
# 已封装到Reflect函数之中
async def execute(self, plan: str):
"""
Args:
plan: This is a string address of the action we need to execute.
It comes in the form of "{world}:{sector}:{arena}:{game_objects}".
It is important that you access this without doing negative
indexing (e.g., [-1]) because the latter address elements may not be
present in some cases.
e.g., "dolores double studio:double studio:bedroom 1:bed"
"""
roles = self.rc.env.get_roles()
if "<random>" in plan and self.rc.scratch.planned_path == []:
self.rc.scratch.act_path_set = False
# <act_path_set> is set to True if the path is set for the current action.
# It is False otherwise, and means we need to construct a new path.
if not self.rc.scratch.act_path_set:
# <target_tiles> is a list of tile coordinates where the persona may go
# to execute the current action. The goal is to pick one of them.
target_tiles = None
logger.info(f"Role {self.name} plan: {plan}")
if "<persona>" in plan:
# Executing persona-persona interaction.
target_p_tile = roles[plan.split("<persona>")[-1].strip()].scratch.curr_tile
collision_maze = self.rc.env.observe()["collision_maze"]
potential_path = path_finder(
collision_maze, self.rc.scratch.curr_tile, target_p_tile, collision_block_id
)
if len(potential_path) <= 2:
target_tiles = [potential_path[0]]
else:
collision_maze = self.rc.env.observe()["collision_maze"]
potential_1 = path_finder(
collision_maze,
self.rc.scratch.curr_tile,
potential_path[int(len(potential_path) / 2)],
collision_block_id,
)
potential_2 = path_finder(
collision_maze,
self.rc.scratch.curr_tile,
potential_path[int(len(potential_path) / 2) + 1],
collision_block_id,
)
if len(potential_1) <= len(potential_2):
target_tiles = [potential_path[int(len(potential_path) / 2)]]
else:
target_tiles = [potential_path[int(len(potential_path) / 2 + 1)]]
elif "<waiting>" in plan:
# Executing interaction where the persona has decided to wait before
# executing their action.
x = int(plan.split()[1])
y = int(plan.split()[2])
target_tiles = [[x, y]]
elif "<random>" in plan:
# Executing a random location action.
plan = ":".join(plan.split(":")[:-1])
address_tiles = self.rc.env.observe()["address_tiles"]
target_tiles = address_tiles[plan]
target_tiles = random.sample(list(target_tiles), 1)
else:
# This is our default execution. We simply take the persona to the
# location where the current action is taking place.
# Retrieve the target addresses. Again, plan is an action address in its
# string form. <maze.address_tiles> takes this and returns candidate
# coordinates.
address_tiles = self.rc.env.observe()["address_tiles"]
if plan not in address_tiles:
address_tiles["Johnson Park:park:park garden"] # ERRORRRRRRR
else:
target_tiles = address_tiles[plan]
# There are sometimes more than one tile returned from this (e.g., a tabe
# may stretch many coordinates). So, we sample a few here. And from that
# random sample, we will take the closest ones.
if len(target_tiles) < 4:
target_tiles = random.sample(list(target_tiles), len(target_tiles))
else:
target_tiles = random.sample(list(target_tiles), 4)
# If possible, we want personas to occupy different tiles when they are
# headed to the same location on the maze. It is ok if they end up on the
# same time, but we try to lower that probability.
# We take care of that overlap here.
persona_name_set = set(roles.keys())
new_target_tiles = []
for i in target_tiles:
access_tile = self.rc.env.observe(EnvObsParams(obs_type=EnvObsType.GET_TITLE, coord=i))
curr_event_set = access_tile["events"]
pass_curr_tile = False
for j in curr_event_set:
if j[0] in persona_name_set:
pass_curr_tile = True
if not pass_curr_tile:
new_target_tiles += [i]
if len(new_target_tiles) == 0:
new_target_tiles = target_tiles
target_tiles = new_target_tiles
# Now that we've identified the target tile, we find the shortest path to
# one of the target tiles.
curr_tile = self.rc.scratch.curr_tile
closest_target_tile = None
path = None
for i in target_tiles:
# path_finder takes a collision_mze and the curr_tile coordinate as
# an input, and returns a list of coordinate tuples that becomes the
# path.
# e.g., [(0, 1), (1, 1), (1, 2), (1, 3), (1, 4)...]
collision_maze = self.rc.env.observe()["collision_maze"]
curr_path = path_finder(collision_maze, curr_tile, i, collision_block_id)
if not closest_target_tile:
closest_target_tile = i
path = curr_path
elif len(curr_path) < len(path):
closest_target_tile = i
path = curr_path
# Actually setting the <planned_path> and <act_path_set>. We cut the
# first element in the planned_path because it includes the curr_tile.
self.rc.scratch.planned_path = path[1:]
self.rc.scratch.act_path_set = True
# Setting up the next immediate step. We stay at our curr_tile if there is
# no <planned_path> left, but otherwise, we go to the next tile in the path.
ret = self.rc.scratch.curr_tile
if self.rc.scratch.planned_path:
ret = self.rc.scratch.planned_path[0]
self.rc.scratch.planned_path = self.rc.scratch.planned_path[1:]
description = f"{self.rc.scratch.act_description}"
description += f" @ {self.rc.scratch.act_address}"
execution = ret, self.rc.scratch.act_pronunciatio, description
return execution
async def update_role_env(self) -> bool:
role_env = get_role_environment(self.sim_code, self.name, self.step)
ret = True
if role_env:
for key, val in self.game_obj_cleanup.items():
self.rc.env.step(EnvAction(action_type=EnvActionType.TURN_TILE_EVENT_IDLE, coord=val, event=key))
# reset game_obj_cleanup
self.game_obj_cleanup = dict()
curr_tile = self.role_tile
new_tile = (role_env["x"], role_env["y"])
self.rc.env.step(
EnvAction(action_type=EnvActionType.RM_TITLE_SUB_EVENT, coord=curr_tile, subject=self.name)
)
self.rc.env.step(
EnvAction(
action_type=EnvActionType.ADD_TILE_EVENT,
coord=new_tile,
event=self.scratch.get_curr_event_and_desc(),
)
)
# the persona will travel to get to their destination. *Once*
# the persona gets there, we activate the object action.
if not self.scratch.planned_path:
self.game_obj_cleanup[self.scratch.get_curr_event_and_desc()] = new_tile
self.rc.env.step(
EnvAction(
action_type=EnvActionType.ADD_TILE_EVENT,
coord=new_tile,
event=self.scratch.get_curr_event_and_desc(),
)
)
blank = (self.scratch.get_curr_obj_event_and_desc()[0], None, None, None)
self.rc.env.step(EnvAction(action_type=EnvActionType.RM_TILE_EVENT, coord=new_tile, event=blank))
# update role's new tile
self.rc.scratch.curr_tile = new_tile
else:
ret = False
time.sleep(1)
logger.warning(
f"{self.sim_code}/environment/{self.step}.json not exist or parses failed, " f"sleep 1s and re-check"
)
return ret
async def _react(self) -> Message:
# update role env
ret = await self.update_role_env()
if not ret:
# TODO add message
logger.info(f"Role: {self.name} update_role_env return False")
return DummyMessage()
new_day = False
if not self.scratch.curr_time or self.inner_voice:
new_day = "First day"
elif self.scratch.curr_time.strftime("%A %B %d") != self.curr_time.strftime("%A %B %d"):
new_day = "New day"
logger.info(f"Role: {self.name} new_day: {new_day}")
self.rc.scratch.curr_time = self.curr_time
# get maze_env from self.rc.env, and observe env info
observed = await self.observe()
# use self.rc.memory 's retrieve functions
retrieved = self.retrieve(observed)
plans = await plan(self, self.rc.env.get_roles(), new_day, retrieved)
await self.reflect()
# feed-back into maze_env
next_tile, pronunciatio, description = await self.execute(plans)
role_move = {
"movement": next_tile,
"pronunciatio": pronunciatio,
"description": description,
"chat": self.scratch.chat,
}
save_movement(self.name, role_move, step=self.step, sim_code=self.sim_code, curr_time=self.curr_time)
# step update
logger.info(f"Role: {self.name} run at {self.step} step on {self.curr_time} at tile: {self.scratch.curr_tile}")
self.step += 1
save_environment(self.name, self.step, self.sim_code, next_tile)
self.curr_time += timedelta(seconds=self.sec_per_step)
self.inner_voice = False
time.sleep(0.5)
return DummyMessage()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/reflect/reflect.py | metagpt/ext/stanford_town/reflect/reflect.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : Reflect function
import datetime
import time
from metagpt.ext.stanford_town.actions.run_reflect_action import (
AgentChatPoignancy,
AgentEventPoignancy,
AgentEventTriple,
AgentFocusPt,
AgentInsightAndGuidance,
AgentMemoryOnConvo,
AgentPlanThoughtOnConvo,
)
from metagpt.ext.stanford_town.memory.retrieve import new_agent_retrieve
from metagpt.ext.stanford_town.utils.utils import get_embedding
from metagpt.logs import logger
async def generate_focal_points(role: "STRole", n: int = 3):
nodes = [
[i.last_accessed, i] for i in role.memory.event_list + role.memory.thought_list if "idle" not in i.embedding_key
]
nodes = sorted(nodes, key=lambda x: x[0])
nodes = [i for _, i in nodes]
statements = ""
for node in nodes[-1 * role.scratch.importance_ele_n :]:
statements += node.embedding_key + "\n"
run_focal_pt = AgentFocusPt()
return await run_focal_pt.run(role, statements, n)
async def generate_insights_and_evidence(role: "STRole", nodes: list, n: int = 5):
statements = ""
for count, node in enumerate(nodes):
statements += f"{str(count)}. {node.embedding_key}\n"
run_insight_and_guidance = AgentInsightAndGuidance()
ret = await run_insight_and_guidance.run(role, statements, n)
logger.info(ret)
try:
for thought, evi_raw in ret.items():
evidence_node_id = [nodes[i].memory_id for i in evi_raw]
ret[thought] = evidence_node_id
return ret
except Exception as exp:
logger.error(f"generate_insights_and_evidence error:{exp}")
return {"this is blank": "node_1"}
async def generate_action_event_triple(act_desp: str, role: "STRole"):
"""TODO
INPUT:
act_desp: the description of the action (e.g., "sleeping")
role: The Persona class instance
OUTPUT:
a string of emoji that translates action description.
EXAMPLE OUTPUT:
"🧈🍞"
"""
run_event_triple = AgentEventTriple()
result = await run_event_triple.run(act_desp, role)
return result
async def generate_poig_score(role: "STRole", event_type: str, description: str):
if "is idle" in description:
return 1
if event_type == "event" or event_type == "thought":
run_event_poignancy = AgentEventPoignancy()
return await run_event_poignancy.run(role, description)
elif event_type == "chat":
run_chat_poignancy = AgentChatPoignancy()
return await run_chat_poignancy.run(role, role.scratch.act_description)
async def generate_planning_thought_on_convo(role: "STRole", all_utt: str):
run_planning_on_convo = AgentPlanThoughtOnConvo()
return await run_planning_on_convo.run(role, all_utt)
async def generate_memo_on_convo(role: "STRole", all_utt: str):
run_memo_on_convo = AgentMemoryOnConvo()
return await run_memo_on_convo.run(role, all_utt)
# Done
async def run_reflect(role: "STRole"):
"""
Run the actual reflection. We generate the focal points, retrieve any
relevant nodes, and generate thoughts and insights.
INPUT:
role: Current Persona object
Output:
None
"""
# Reflection requires certain focal points. Generate that first.
focal_points = await generate_focal_points(role, 3)
# Retrieve the relevant Nodesobject for each of the focal points.
# <retrieved> has keys of focal points, and values of the associated Nodes.
retrieved = new_agent_retrieve(role, focal_points)
# For each of the focal points, generate thoughts and save it in the
# agent's memory.
for focal_pt, nodes in retrieved.items():
xx = [i.embedding_key for i in nodes]
for xxx in xx:
logger.info(f"Nodes retrieved for `{focal_pt}` are `{xxx}`.")
thoughts = await generate_insights_and_evidence(role, nodes, 5)
# 生成的是字典类型
for thought, evidence in thoughts.items():
created = role.scratch.curr_time
expiration = created + datetime.timedelta(days=30)
s, p, o = await generate_action_event_triple("(" + thought + ")", role)
keywords = set([s, p, o])
thought_poignancy = await generate_poig_score(role, "thought", thought)
thought_embedding_pair = (thought, get_embedding(thought))
role.memory.add_thought(
created, expiration, s, p, o, thought, keywords, thought_poignancy, thought_embedding_pair, evidence
)
logger.info(f"add thought memory: {thought}, evidence: {evidence}")
time.sleep(2) # avoid Rate limit
def reflection_trigger(role: "STRole"):
"""
Given the current role, determine whether the role should run a
reflection.
Our current implementation checks for whether the sum of the new importance
measure has reached the set (hyper-parameter) threshold.
INPUT:
role: Current Persona object
Output:
True if we are running a new reflection.
False otherwise.
"""
logger.info(f"{role.scratch.name} role.scratch.importance_trigger_curr:: {role.scratch.importance_trigger_curr}"),
if role.scratch.importance_trigger_curr <= 0 and [] != role.memory.event_list + role.memory.thought_list:
return True
return False
# Done
def reset_reflection_counter(role: "STRole"):
"""
We reset the counters used for the reflection trigger.
INPUT:
role: Current Persona object
Output:
None
"""
role_imt_max = role.scratch.importance_trigger_max
role.scratch.importance_trigger_curr = role_imt_max
role.scratch.importance_ele_n = 0
async def role_reflect(role: "STRole"):
"""
The main reflection module for the role. We first check if the trigger
conditions are met, and if so, run the reflection and reset any of the
relevant counters.
INPUT:
role: Current Persona object
Output:
None
"""
if reflection_trigger(role):
await run_reflect(role)
reset_reflection_counter(role)
if role.scratch.chatting_end_time:
# update 10 to it's real sec_per_step value
if role.scratch.curr_time + datetime.timedelta(0, role.sec_per_step) == role.scratch.chatting_end_time:
all_utt = ""
if role.scratch.chat:
for row in role.scratch.chat:
all_utt += f"{row[0]}: {row[1]}\n"
last_chat = role.memory.get_last_chat(role.scratch.chatting_with)
if last_chat:
evidence = [last_chat.memory_id]
else:
logger.info(f"Role: {role.name} get_last_chat: {last_chat}")
return
planning_thought = await generate_planning_thought_on_convo(role, all_utt)
planning_thought = f"For {role.scratch.name}'s planning: {planning_thought}"
logger.info(f"Role: {role.name} planning_thought: {planning_thought}")
created = role.scratch.curr_time
expiration = created + datetime.timedelta(days=30)
s, p, o = await generate_action_event_triple(planning_thought, role)
keywords = set([s, p, o])
thought_poignancy = await generate_poig_score(role, "thought", planning_thought)
thought_embedding_pair = (planning_thought, get_embedding(planning_thought))
role.memory.add_thought(
created,
expiration,
s,
p,
o,
planning_thought,
keywords,
thought_poignancy,
thought_embedding_pair,
evidence,
)
memo_thought = await generate_memo_on_convo(role, all_utt)
memo_thought = f"{role.scratch.name} {memo_thought}"
created = role.scratch.curr_time
expiration = created + datetime.timedelta(days=30)
s, p, o = await generate_action_event_triple(memo_thought, role)
keywords = set([s, p, o])
thought_poignancy = await generate_poig_score(role, "thought", memo_thought)
thought_embedding_pair = (memo_thought, get_embedding(memo_thought))
role.memory.add_thought(
created,
expiration,
s,
p,
o,
memo_thought,
keywords,
thought_poignancy,
thought_embedding_pair,
evidence,
)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/reflect/__init__.py | metagpt/ext/stanford_town/reflect/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : reflection module
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/ext/stanford_town/prompts/__init__.py | metagpt/ext/stanford_town/prompts/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : stanford town prompt templates
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/prompts/product_manager.py | metagpt/prompts/product_manager.py | from metagpt.prompts.di.role_zero import ROLE_INSTRUCTION
EXTRA_INSTRUCTION = """
You are a product manager AI assistant specializing in product requirement documentation and market research analysis.
Your work focuses on the analysis of problems and data.
You should always output a document.
## Core Tools
1. Editor: For the creation and modification of `PRD/Research Report` documents.
2. SearchEnhancedQA: The specified tool for collecting information from the internet MUST BE USED for searching.
3. Browser: Access the search results provided by the SearchEnhancedQA tool using the "goto" method.
## Mode 1: PRD Creation
Triggered by software/product requests or feature enhancements, ending with the output of a complete PRD.
### Required Fields
1. Language & Project Info
- Language: Match user's language
- Programming Language: If not specified in the requirements, use Vite, React, MUI, Tailwind CSS.
- Project Name: Use snake_case format
- Restate the original requirements
2. Product Definition(**IMPORTANT** )
- Product Goals: 3 clear, orthogonal goals
- User Stories: 3-5 scenarios in "As a [role], I want [feature] so that [benefit]" format
- Competitive Analysis: 5-7 products with pros/cons
- Competitive Quadrant Chart(Required): Using Mermaid
3. Technical Specifications
- Requirements Analysis: Comprehensive overview of technical needs
- Requirements Pool: List with P0/P1/P2 priorities
- UI Design Draft: Basic layout and functionality
- Open Questions: Unclear aspects needing clarification
#### Mermaid Diagram Rules
1. Use mermaid quadrantChart syntax. Distribute scores evenly between 0 and 1
2. Example:
```mermaid
quadrantChart
title "Reach and engagement of campaigns"
x-axis "Low Reach" --> "High Reach"
y-axis "Low Engagement" --> "High Engagement"
quadrant-1 "We should expand"
quadrant-2 "Need to promote"
quadrant-3 "Re-evaluate"
quadrant-4 "May be improved"
"Campaign A": [0.3, 0.6]
"Campaign B": [0.45, 0.23]
"Campaign C": [0.57, 0.69]
"Campaign D": [0.78, 0.34]
"Campaign E": [0.40, 0.34]
"Campaign F": [0.35, 0.78]
"Our Target Product": [0.5, 0.6]
```
### PRD Document Guidelines
- Use clear requirement language (Must/Should/May)
- Include measurable criteria
- Prioritize clearly (P0: Must-have, P1: Should-have, P2: Nice-to-have)
- Support with diagrams and charts
- Focus on user value and business goals
## Mode 2: Market Research
Triggered by market analysis or competitor research requests, ending with the output of a complete report document.
### **IMPORTANT** Information Collection Requirements
Must follow this strict information gathering process:
1. Keyword Generation Rules:
- Infer 3 distinct keyword groups on user needs(Infer directly instead of using tools).
- Each group must be a space-separated phrase containing:
* Target industry/product name (REQUIRED)
* Specific aspect or metric
* Time frame or geographic scope when relevant
Example format:
- Group 1: "electric vehicles market size forecast 2024"
- Group 2: "electric vehicles manufacturing costs analysis"
- Group 3: "electric vehicles consumer preferences survey"
2. Search Process:
- For each keyword:
* Use SearchEnhancedQA TOOL (SearchEnhancedQA.run) collect top 3 search results
* Remove duplicate URLs
3. Information Analysis:
- Must read and analyze EACH unique source individually
- Synthesize information across all sources
- Cross-reference and verify key data points
- Identify critical insights and trends
4. Quality Control:
- Verify data consistency across sources
- Fill information gaps with targeted additional research
- Ensure balanced perspective from multiple sources
### Report Structure
1. Summary: Key findings and recommendations
2. Industry Overview: Market size, trends, and structure
3. Market Analysis: Segments, growth drivers, and challenges
4. Competitor Landscape: Key players and positioning
5. Target Audience Analysis: User segments and needs
6. Pricing Analysis: Market rates and strategies
7. Key Findings: Major insights and opportunities
8. Strategic Recommendations: Action items
9. Appendices: Supporting data
### Final Report Requirements
1. Report must be entirely focused on insights and analysis:
- No mention of research methodology
- No source tracking or process documentation
- Present only validated findings and conclusions
2. Professional Format:
- Clear section hierarchy
- Rich subsection content
- Evidence-based analysis
- Data visualization where appropriate
3. Content Depth Requirements:
Executive Summary (500+ words):
- Key Market Metrics
- Critical Findings
- Strategic Recommendations
Industry Overview (800+ words):
- Market Size and Growth
- Industry Value Chain
- Regulatory Environment
- Technology Trends
4. Quality Standards:
- Every main section must have 3+ detailed subsections
- Each subsection requires 200-300 words minimum
- Include specific examples and data points
- Support all major claims with market evidence
### Research Guidelines
- Base all analysis on collected data
- Include quantitative and qualitative insights
- Support claims with evidence
- Maintain professional formatting
- Use visuals to support key points
## Document Standards
1. Format
- Clear heading hierarchy
- Consistent markdown formatting
- Numbered sections
- Professional graphics
- Output charts using Mermaid syntax
2. Content
- Objective analysis
- Actionable insights
- Clear recommendations
- Supporting evidence
3. Quality Checks
- Verify data accuracy
- Cross-reference sources
- Ensure completeness
- Review clarity
Remember:
- Always start with thorough requirements analysis
- Use appropriate tools for each task
- Keep recommendations actionable
- Consider all stakeholder perspectives
- Maintain professional standards throughout
"""
PRODUCT_MANAGER_INSTRUCTION = ROLE_INSTRUCTION + EXTRA_INSTRUCTION.strip()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/prompts/task_type.py | metagpt/prompts/task_type.py | # Prompt for taking on "eda" tasks
EDA_PROMPT = """
The current task is about exploratory data analysis, please note the following:
- Distinguish column types with `select_dtypes` for tailored analysis and visualization, such as correlation.
- Remember to `import numpy as np` before using Numpy functions.
"""
# Prompt for taking on "data_preprocess" tasks
DATA_PREPROCESS_PROMPT = """
The current task is about data preprocessing, please note the following:
- Monitor data types per column, applying appropriate methods.
- Ensure operations are on existing dataset columns.
- Avoid writing processed data to files.
- **ATTENTION** Do NOT make any changes to the label column, such as standardization, etc.
- Prefer alternatives to one-hot encoding for categorical data.
- Only encode or scale necessary columns to allow for potential feature-specific engineering tasks (like time_extract, binning, extraction, etc.) later.
- Each step do data preprocessing to train, must do same for test separately at the same time.
- Always copy the DataFrame before processing it and use the copy to process.
"""
# Prompt for taking on "feature_engineering" tasks
FEATURE_ENGINEERING_PROMPT = """
The current task is about feature engineering. when performing it, please adhere to the following principles:
- Generate as diverse features as possible to improve the model's performance step-by-step.
- Use available feature engineering tools if they are potential impactful.
- Avoid creating redundant or excessively numerous features in one step.
- Exclude ID columns from feature generation and remove them.
- Each feature engineering operation performed on the train set must also applies to the dev/test separately at the same time.
- **ATTENTION** Do NOT use the label column to create features, except for cat encoding.
- Use the data from previous task result if exist, do not mock or reload data yourself.
- Always copy the DataFrame before processing it and use the copy to process.
"""
# Prompt for taking on "model_train" tasks
MODEL_TRAIN_PROMPT = """
The current task is about training a model, please ensure high performance:
- For tabular datasets - you have access to XGBoost, CatBoost, random forest, extremely randomized trees, k-nearest neighbors, linear regression, etc.
- For image datasets - you have access to Swin Transformer, ViT, ResNet, EfficientNet, etc.
- For text datasets - you have access to Electra, DeBERTa, GPT-2, BERT, etc.
- Avoid the use of SVM because of its high training time.
- Keep in mind that your user prioritizes results and is highly focused on model performance. So, when needed, feel free to use models of any complexity to improve effectiveness, such as XGBoost, CatBoost, etc.
- If non-numeric columns exist, perform label encode together with all steps.
- Use the data from previous task result directly, do not mock or reload data yourself.
- Set suitable hyperparameters for the model, make metrics as high as possible.
"""
# Prompt for taking on "model_evaluate" tasks
MODEL_EVALUATE_PROMPT = """
The current task is about evaluating a model, please note the following:
- Ensure that the evaluated data is same processed as the training data. If not, remember use object in 'Done Tasks' to transform the data.
- Use trained model from previous task result directly, do not mock or reload model yourself.
"""
# Prompt for taking on "image2webpage" tasks
IMAGE2WEBPAGE_PROMPT = """
The current task is about converting image into webpage code. please note the following:
- Single-Step Code Generation: Execute the entire code generation process in a single step, encompassing HTML, CSS, and JavaScript. Avoid fragmenting the code generation into multiple separate steps to maintain consistency and simplify the development workflow.
- Save webpages: Be sure to use the save method provided.
"""
# Prompt for taking on "web_scraping" tasks
WEB_SCRAPING_PROMPT = """
- Remember to view and print the necessary HTML content in a separate task to understand the structure first before scraping data. Such as `html_content = await view_page_element_to_scrape(...)\nprint(html_content)`.
- Since the data required by user may not correspond directly to the actual HTML element names, you should thoroughly analyze the HTML structure and meanings of all elements in your context first. Ensure the `class_` in your code should derived from the actual HTML structure directly, not based on your knowledge. To ensure it, analyse the most suitable location of the 'class_' in the actual HTML content before code.
- Reuse existing html object variable from previous code (if any) to extract data, do not mock or hard code a html variable yourself.
"""
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.