repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/provider/zhipuai/__init__.py | tests/metagpt/provider/zhipuai/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/environment/test_base_env.py | tests/metagpt/environment/test_base_env.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : the unittest of ExtEnv&Env
from typing import Any, Optional
import pytest
from metagpt.base.base_env_space import BaseEnvAction, BaseEnvObsParams
from metagpt.environment.api.env_api import EnvAPIAbstract
from metagpt.environment.base_env import (
Environment,
env_read_api_registry,
env_write_api_registry,
mark_as_readable,
mark_as_writeable,
)
class ForTestEnv(Environment):
value: int = 0
def reset(
self,
*,
seed: Optional[int] = None,
options: Optional[dict[str, Any]] = None,
) -> tuple[dict[str, Any], dict[str, Any]]:
pass
def observe(self, obs_params: Optional[BaseEnvObsParams] = None) -> Any:
pass
def step(self, action: BaseEnvAction) -> tuple[dict[str, Any], float, bool, bool, dict[str, Any]]:
pass
@mark_as_readable
def read_api_no_param(self):
return self.value
@mark_as_readable
def read_api(self, a: int, b: int):
return a + b
@mark_as_writeable
def write_api(self, a: int, b: int):
self.value = a + b
@mark_as_writeable
async def async_read_api(self, a: int, b: int):
return a + b
@pytest.mark.asyncio
async def test_ext_env():
env = ForTestEnv()
assert len(env_read_api_registry) > 0
assert len(env_write_api_registry) > 0
apis = env.get_all_available_apis(mode="read")
assert len(apis) > 0
assert len(apis["read_api"]) == 3
_ = await env.write_thru_api(EnvAPIAbstract(api_name="write_api", kwargs={"a": 5, "b": 10}))
assert env.value == 15
with pytest.raises(KeyError):
await env.read_from_api("not_exist_api")
assert await env.read_from_api("read_api_no_param") == 15
assert await env.read_from_api(EnvAPIAbstract(api_name="read_api", kwargs={"a": 5, "b": 5})) == 10
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/environment/minecraft_env/__init__.py | tests/metagpt/environment/minecraft_env/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/environment/minecraft_env/test_minecraft_ext_env.py | tests/metagpt/environment/minecraft_env/test_minecraft_ext_env.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : the unittest of MinecraftExtEnv
from metagpt.environment.minecraft.const import MC_CKPT_DIR
from metagpt.environment.minecraft.minecraft_ext_env import MinecraftExtEnv
def test_minecraft_ext_env():
ext_env = MinecraftExtEnv()
assert ext_env.server, f"{ext_env.server_host}:{ext_env.server_port}"
assert MC_CKPT_DIR.joinpath("skill/code").exists()
assert ext_env.warm_up.get("optional_inventory_items") == 7
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/environment/mgx_env/run_mgx_env.py | tests/metagpt/environment/mgx_env/run_mgx_env.py | import asyncio
import os
import re
import threading
import time
from metagpt.environment.mgx.mgx_env import MGXEnv
from metagpt.roles import Architect, Engineer, ProductManager, ProjectManager
from metagpt.roles.di.data_analyst import DataAnalyst
from metagpt.roles.di.engineer2 import Engineer2
from metagpt.roles.di.team_leader import TeamLeader
from metagpt.schema import Message
async def main(requirement="", enable_human_input=False, use_fixed_sop=False, allow_idle_time=30):
if use_fixed_sop:
engineer = Engineer(n_borg=5, use_code_review=False)
else:
engineer = Engineer2()
env = MGXEnv()
env.add_roles(
[
TeamLeader(),
ProductManager(use_fixed_sop=use_fixed_sop),
Architect(use_fixed_sop=use_fixed_sop),
ProjectManager(use_fixed_sop=use_fixed_sop),
engineer,
# QaEngineer(),
DataAnalyst(),
]
)
if enable_human_input:
# simulate human sending messages in chatbox
stop_event = threading.Event()
human_input_thread = send_human_input(env, stop_event)
if requirement:
env.publish_message(Message(content=requirement))
# user_defined_recipient = "Alex"
# env.publish_message(Message(content=requirement, send_to={user_defined_recipient}), user_defined_recipient=user_defined_recipient)
allow_idle_time = allow_idle_time if enable_human_input else 1
start_time = time.time()
while time.time() - start_time < allow_idle_time:
if not env.is_idle:
await env.run()
start_time = time.time() # reset start time
if enable_human_input:
print("No more human input, terminating, press ENTER for a full termination.")
stop_event.set()
human_input_thread.join()
def send_human_input(env, stop_event):
"""
Simulate sending message in chatbox
Note in local environment, the message is consumed only after current round of env.run is finished
"""
def send_messages():
while not stop_event.is_set():
message = input("Enter a message any time: ")
user_defined_recipient = re.search(r"@(\w+)", message)
if user_defined_recipient:
recipient_name = user_defined_recipient.group(1)
print(f"{recipient_name} will receive the message")
env.publish_message(
Message(content=message, send_to={recipient_name}), user_defined_recipient=recipient_name
)
else:
env.publish_message(Message(content=message))
# Start a thread for sending messages
send_thread = threading.Thread(target=send_messages, args=())
send_thread.start()
return send_thread
GAME_REQ = "create a 2048 game"
GAME_REQ_ZH = "写一个贪吃蛇游戏"
WEB_GAME_REQ = "Write a 2048 game using JavaScript without using any frameworks, user can play with keyboard."
WEB_GAME_REQ_DEPLOY = "Write a 2048 game using JavaScript without using any frameworks, user can play with keyboard. When finished, deploy the game to public at port 8090."
TODO_APP_REQ = "Create a website widget for TODO list management. Users should be able to add, mark as complete, and delete tasks. Include features like prioritization, due dates, and categories. Make it visually appealing, responsive, and user-friendly. Use HTML, CSS, and JavaScript. Consider additional features like notifications or task export. Keep it simple and enjoyable for users.dont use vue or react.dont use third party library, use localstorage to save data."
FLAPPY_BIRD_REQ = "write a flappy bird game in pygame, code only"
SIMPLE_DATA_REQ = "load sklearn iris dataset and print a statistic summary"
WINE_REQ = "Run data analysis on sklearn Wine recognition dataset, and train a model to predict wine class (20% as validation), and show validation accuracy."
PAPER_LIST_REQ = """
Get data from `paperlist` table in https://papercopilot.com/statistics/iclr-statistics/iclr-2024-statistics/,
and save it to a csv file. paper title must include `multiagent` or `large language model`. *notice: print key variables*
"""
ECOMMERCE_REQ = """
Get products data from website https://scrapeme.live/shop/ and save it as a csv file.
**Notice: Firstly parse the web page encoding and the text HTML structure;
The first page product name, price, product URL, and image URL must be saved in the csv;**
"""
NEWS_36KR_REQ = """从36kr创投平台https://pitchhub.36kr.com/financing-flash 所有初创企业融资的信息, **注意: 这是一个中文网站**;
下面是一个大致流程, 你会根据每一步的运行结果对当前计划中的任务做出适当调整:
1. 爬取并本地保存html结构;
2. 直接打印第7个*`快讯`*关键词后2000个字符的html内容, 作为*快讯的html内容示例*;
3. 反思*快讯的html内容示例*中的规律, 设计正则匹配表达式来获取*`快讯`*的标题、链接、时间;
4. 筛选最近3天的初创企业融资*`快讯`*, 以list[dict]形式打印前5个。
5. 将全部结果存在本地csv中
**Notice: view the page element before writing scraping code**
"""
data_path = "data/titanic"
train_path = f"{data_path}/split_train.csv"
eval_path = f"{data_path}/split_eval.csv"
TITANIC_REQ = f"This is a titanic passenger survival dataset, your goal is to predict passenger survival outcome. The target column is Survived. Perform data analysis, data preprocessing, feature engineering, and modeling to predict the target. Report accuracy on the eval data. Train data path: '{train_path}', eval data path: '{eval_path}'."
CALIFORNIA_HOUSING_REQ = """
Analyze the 'Canifornia-housing-dataset' using https://scikit-learn.org/stable/modules/generated/sklearn.datasets.fetch_california_housing.html#sklearn.datasets.fetch_california_housing to predict the median house value. you need to perfrom data preprocessing, feature engineering and finally modeling to predict the target. Use machine learning techniques such as linear regression (including ridge regression and lasso regression), random forest, XGBoost. You also need to report the MSE on the test dataset
"""
STOCK_REQ = """Import NVIDIA Corporation (NVDA) stock price data from Yahoo Finance, focusing on historical closing prices from the past 5 years.
Summary statistics (mean, median, standard deviation, etc.) to understand the central tendency and dispersion of closingprices. Analyze the data for any noticeable trends, patterns, or anomalies over time, potentially using rolling averages or percentage changes.
Create a pot to visualize all the data analysis. Reserve 20% of the dataset for validaation. Train a predictive model on the training set. Report the modeel's validation accuracy, and visualize the result of prediction result.
"""
FIX_ISSUE1 = """
Write a fix for this issue: https://github.com/langchain-ai/langchain/issues/20453,
you can fix it on this repo https://github.com/garylin2099/langchain,
checkout a branch named test-fix, commit your changes, push, and create a PR to the master branch of https://github.com/iorisa/langchain
"""
FIX_ISSUE2 = """
Write a fix for this issue https://github.com/geekan/MetaGPT/issues/1275.
You can fix it on the v0.8-release branch of this repo https://github.com/garylin2099/MetaGPT,
during fixing, checkout a branch named test-fix-1275, commit your changes, push, and create a PR to the v0.8-release branch of https://github.com/garylin2099/MetaGPT
"""
FIX_ISSUE3 = """
Write a fix for this issue https://github.com/geekan/MetaGPT/issues/1262.
You can fix it on this repo https://github.com/garylin2099/MetaGPT,
during fixing, checkout a branch named test-fix-1262, commit your changes, push, and create a PR to https://github.com/garylin2099/MetaGPT
"""
FIX_ISSUE_SIMPLE = """
Write a fix for this issue: https://github.com/mannaandpoem/simple_calculator/issues/1,
you can fix it on this repo https://github.com/garylin2099/simple_calculator,
checkout a branch named test, commit your changes, push, and create a PR to the master branch of original repo.
"""
PUSH_PR_REQ = """
clone https://github.com/garylin2099/simple_calculator, checkout a new branch named test-branch, add an empty file test_file.py to the repo.
Commit your changes and push, finally, create a PR to the master branch of https://github.com/mannaandpoem/simple_calculator.
"""
IMAGE2CODE_REQ = "Please write a frontend web page similar to this image /Users/gary/Files/temp/workspace/temp_img.png, I want the same title and color. code only"
DOC_QA_REQ1 = "Tell me what this paper is about /Users/gary/Files/temp/workspace/2308.09687.pdf"
DOC_QA_REQ2 = "Summarize this doc /Users/gary/Files/temp/workspace/2401.14295.pdf"
DOC_QA_REQ3 = "请总结/Users/gary/Files/temp/workspace/2309.04658.pdf里的关键点"
DOC_QA_REQ4 = "这份报表/Users/gary/Files/temp/workspace/9929550.md中,营业收入TOP3产品各自的收入占比是多少"
TL_CHAT1 = """Summarize the paper for me""" # expecting clarification
TL_CHAT2 = """Solve the issue at this link""" # expecting clarification
TL_CHAT3 = """Who is the first man landing on Moon""" # expecting answering directly
TL_CHAT4 = """Find all zeros in the indicated finite field of the given polynomial with coefficients in that field. x^5 + 3x^3 + x^2 + 2x in Z_5""" # expecting answering directly
TL_CHAT5 = """Find the degree for the given field extension Q(sqrt(2), sqrt(3), sqrt(18)) over Q.""" # expecting answering directly
TL_CHAT6 = """True or False? Statement 1 | A ring homomorphism is one to one if and only if the kernel is {{0}},. Statement 2 | Q is an ideal in R""" # expecting answering directly
TL_CHAT7 = """Jean has 30 lollipops. Jean eats 2 of the lollipops. With the remaining lollipops, Jean wants to package 2 lollipops in one bag. How many bags can Jean fill?""" # expecting answering directly
TL_CHAT9 = """What's your name?"""
TL_CHAT10 = "Hi"
TL_CHAT11 = "Tell me about your team"
TL_CHAT12 = "What can you do"
CODING_REQ1 = "写一个java的hello world程序"
CODING_REQ2 = "python里的装饰器是什么"
CODING_REQ3 = "python里的装饰器是怎么用的,给我个例子"
if __name__ == "__main__":
# NOTE: Add access_token to test github issue fixing
os.environ["access_token"] = "ghp_xxx"
# NOTE: Change the requirement to the one you want to test
# Set enable_human_input to True if you want to simulate sending messages in chatbox
asyncio.run(main(requirement=GAME_REQ, enable_human_input=False, use_fixed_sop=False))
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/environment/werewolf_env/test_werewolf_ext_env.py | tests/metagpt/environment/werewolf_env/test_werewolf_ext_env.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : the unittest of WerewolfExtEnv
from metagpt.environment.werewolf.const import RoleState, RoleType
from metagpt.environment.werewolf.werewolf_ext_env import WerewolfExtEnv
from metagpt.roles.role import Role
class Werewolf(Role):
profile: str = RoleType.WEREWOLF.value
class Villager(Role):
profile: str = RoleType.VILLAGER.value
class Witch(Role):
profile: str = RoleType.WITCH.value
class Guard(Role):
profile: str = RoleType.GUARD.value
def test_werewolf_ext_env():
players_state = {
"Player0": (RoleType.WEREWOLF.value, RoleState.ALIVE),
"Player1": (RoleType.WEREWOLF.value, RoleState.ALIVE),
"Player2": (RoleType.VILLAGER.value, RoleState.ALIVE),
"Player3": (RoleType.WITCH.value, RoleState.ALIVE),
"Player4": (RoleType.GUARD.value, RoleState.ALIVE),
}
ext_env = WerewolfExtEnv(players_state=players_state, step_idx=4, special_role_players=["Player3", "Player4"])
assert len(ext_env.living_players) == 5
assert len(ext_env.special_role_players) == 2
assert len(ext_env.werewolf_players) == 2
curr_instr = ext_env.curr_step_instruction()
assert ext_env.step_idx == 5
assert "Werewolves, please open your eyes" in curr_instr["content"]
# current step_idx = 5
ext_env.wolf_kill_someone(wolf_name="Player10", player_name="Player4")
ext_env.wolf_kill_someone(wolf_name="Player0", player_name="Player4")
ext_env.wolf_kill_someone(wolf_name="Player1", player_name="Player4")
assert ext_env.player_hunted == "Player4"
assert len(ext_env.living_players) == 5 # hunted but can be saved by witch
for idx in range(13):
_ = ext_env.curr_step_instruction()
# current step_idx = 18
assert ext_env.step_idx == 18
ext_env.vote_kill_someone(voter_name="Player0", player_name="Player2")
ext_env.vote_kill_someone(voter_name="Player1", player_name="Player3")
ext_env.vote_kill_someone(voter_name="Player2", player_name="Player3")
ext_env.vote_kill_someone(voter_name="Player3", player_name="Player4")
ext_env.vote_kill_someone(voter_name="Player4", player_name="Player2")
assert ext_env.player_current_dead == "Player2"
assert len(ext_env.living_players) == 4
player_names = ["Player0", "Player2"]
assert ext_env.get_players_state(player_names) == dict(zip(player_names, [RoleState.ALIVE, RoleState.KILLED]))
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/environment/werewolf_env/__init__.py | tests/metagpt/environment/werewolf_env/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/environment/api/test_env_api.py | tests/metagpt/environment/api/test_env_api.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
from metagpt.environment.api.env_api import EnvAPIRegistry
def test_env_api_registry():
def test_func():
pass
env_api_registry = EnvAPIRegistry()
env_api_registry["test"] = test_func
env_api_registry.get("test") == test_func
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/environment/api/__init__.py | tests/metagpt/environment/api/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/environment/android_env/test_android_ext_env.py | tests/metagpt/environment/android_env/test_android_ext_env.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : the unittest of AndroidExtEnv
from pathlib import Path
from metagpt.environment.android.android_ext_env import AndroidExtEnv
from metagpt.environment.android.const import ADB_EXEC_FAIL
def mock_device_shape(self, adb_cmd: str) -> str:
return "shape: 720x1080"
def mock_device_shape_invalid(self, adb_cmd: str) -> str:
return ADB_EXEC_FAIL
def mock_list_devices(self) -> str:
return ["emulator-5554"]
def mock_get_screenshot(self, adb_cmd: str) -> str:
return "screenshot_xxxx-xx-xx"
def mock_get_xml(self, adb_cmd: str) -> str:
return "xml_xxxx-xx-xx"
def mock_write_read_operation(self, adb_cmd: str) -> str:
return "OK"
def test_android_ext_env(mocker):
device_id = "emulator-5554"
mocker.patch("metagpt.environment.android.android_ext_env.AndroidExtEnv.execute_adb_with_cmd", mock_device_shape)
mocker.patch("metagpt.environment.android.android_ext_env.AndroidExtEnv.list_devices", mock_list_devices)
ext_env = AndroidExtEnv(device_id=device_id, screenshot_dir="/data2/", xml_dir="/data2/")
assert ext_env.adb_prefix == f"adb -s {device_id} "
assert ext_env.adb_prefix_shell == f"adb -s {device_id} shell "
assert ext_env.adb_prefix_si == f"adb -s {device_id} shell input "
assert ext_env.device_shape == (720, 1080)
mocker.patch(
"metagpt.environment.android.android_ext_env.AndroidExtEnv.execute_adb_with_cmd", mock_device_shape_invalid
)
assert ext_env.device_shape == (0, 0)
assert ext_env.list_devices() == [device_id]
mocker.patch("metagpt.environment.android.android_ext_env.AndroidExtEnv.execute_adb_with_cmd", mock_get_screenshot)
assert ext_env.get_screenshot("screenshot_xxxx-xx-xx", "/data/") == Path("/data/screenshot_xxxx-xx-xx.png")
mocker.patch("metagpt.environment.android.android_ext_env.AndroidExtEnv.execute_adb_with_cmd", mock_get_xml)
assert ext_env.get_xml("xml_xxxx-xx-xx", "/data/") == Path("/data/xml_xxxx-xx-xx.xml")
mocker.patch(
"metagpt.environment.android.android_ext_env.AndroidExtEnv.execute_adb_with_cmd", mock_write_read_operation
)
res = "OK"
assert ext_env.system_back() == res
assert ext_env.system_tap(10, 10) == res
assert ext_env.user_input("test_input") == res
assert ext_env.user_longpress(10, 10) == res
assert ext_env.user_swipe(10, 10) == res
assert ext_env.user_swipe_to((10, 10), (20, 20)) == res
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/environment/android_env/__init__.py | tests/metagpt/environment/android_env/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/environment/stanford_town_env/test_stanford_town_ext_env.py | tests/metagpt/environment/stanford_town_env/test_stanford_town_ext_env.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : the unittest of StanfordTownExtEnv
from pathlib import Path
from metagpt.environment.stanford_town.env_space import (
EnvAction,
EnvActionType,
EnvObsParams,
EnvObsType,
)
from metagpt.environment.stanford_town.stanford_town_ext_env import StanfordTownExtEnv
maze_asset_path = (
Path(__file__)
.absolute()
.parent.joinpath("..", "..", "..", "..", "metagpt/ext/stanford_town/static_dirs/assets/the_ville")
)
def test_stanford_town_ext_env():
ext_env = StanfordTownExtEnv(maze_asset_path=maze_asset_path)
tile_coord = ext_env.turn_coordinate_to_tile((64, 64))
assert tile_coord == (2, 2)
tile = (58, 9)
assert len(ext_env.get_collision_maze()) == 100
assert len(ext_env.get_address_tiles()) == 306
assert ext_env.access_tile(tile=tile)["world"] == "the Ville"
assert ext_env.get_tile_path(tile=tile, level="world") == "the Ville"
assert len(ext_env.get_nearby_tiles(tile=tile, vision_r=5)) == 121
event = ("double studio:double studio:bedroom 2:bed", None, None, None)
ext_env.add_event_from_tile(event, tile)
assert len(ext_env.tiles[tile[1]][tile[0]]["events"]) == 1
ext_env.turn_event_from_tile_idle(event, tile)
ext_env.remove_event_from_tile(event, tile)
assert len(ext_env.tiles[tile[1]][tile[0]]["events"]) == 0
ext_env.remove_subject_events_from_tile(subject=event[0], tile=tile)
assert len(ext_env.tiles[tile[1]][tile[0]]["events"]) == 0
def test_stanford_town_ext_env_observe_step():
ext_env = StanfordTownExtEnv(maze_asset_path=maze_asset_path)
obs, info = ext_env.reset()
assert len(info) == 0
assert len(obs["address_tiles"]) == 306
tile = (58, 9)
obs = ext_env.observe(obs_params=EnvObsParams(obs_type=EnvObsType.TILE_PATH, coord=tile, level="world"))
assert obs == "the Ville"
action = ext_env.action_space.sample()
assert len(action) == 4
assert len(action["event"]) == 4
event = ("double studio:double studio:bedroom 2:bed", None, None, None)
obs, _, _, _, _ = ext_env.step(action=EnvAction(action_type=EnvActionType.ADD_TILE_EVENT, coord=tile, event=event))
assert len(ext_env.tiles[tile[1]][tile[0]]["events"]) == 1
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/environment/stanford_town_env/__init__.py | tests/metagpt/environment/stanford_town_env/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/configs/test_models_config.py | tests/metagpt/configs/test_models_config.py | import pytest
from metagpt.actions.talk_action import TalkAction
from metagpt.configs.models_config import ModelsConfig
from metagpt.const import METAGPT_ROOT, TEST_DATA_PATH
from metagpt.utils.common import aread, awrite
@pytest.mark.asyncio
async def test_models_configs(context):
default_model = ModelsConfig.default()
assert default_model is not None
models = ModelsConfig.from_yaml_file(TEST_DATA_PATH / "config/config2.yaml")
assert models
default_models = ModelsConfig.default()
backup = ""
if not default_models.models:
backup = await aread(filename=METAGPT_ROOT / "config/config2.yaml")
test_data = await aread(filename=TEST_DATA_PATH / "config/config2.yaml")
await awrite(filename=METAGPT_ROOT / "config/config2.yaml", data=test_data)
try:
action = TalkAction(context=context, i_context="who are you?", llm_name_or_type="YOUR_MODEL_NAME_1")
assert action.private_llm.config.model == "YOUR_MODEL_NAME_1"
assert context.config.llm.model != "YOUR_MODEL_NAME_1"
finally:
if backup:
await awrite(filename=METAGPT_ROOT / "config/config2.yaml", data=backup)
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/configs/__init__.py | tests/metagpt/configs/__init__.py | python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false | |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/exp_pool/test_manager.py | tests/metagpt/exp_pool/test_manager.py | import pytest
from metagpt.config2 import Config
from metagpt.configs.exp_pool_config import (
ExperiencePoolConfig,
ExperiencePoolRetrievalType,
)
from metagpt.configs.llm_config import LLMConfig
from metagpt.exp_pool.manager import Experience, ExperienceManager
from metagpt.exp_pool.schema import DEFAULT_SIMILARITY_TOP_K, QueryType
class TestExperienceManager:
@pytest.fixture
def mock_config(self):
return Config(
llm=LLMConfig(),
exp_pool=ExperiencePoolConfig(
enable_write=True, enable_read=True, enabled=True, retrieval_type=ExperiencePoolRetrievalType.BM25
),
)
@pytest.fixture
def mock_storage(self, mocker):
engine = mocker.MagicMock()
engine.add_objs = mocker.MagicMock()
engine.aretrieve = mocker.AsyncMock(return_value=[])
engine.count = mocker.MagicMock(return_value=10)
return engine
@pytest.fixture
def exp_manager(self, mock_config, mock_storage):
manager = ExperienceManager(config=mock_config)
manager._storage = mock_storage
return manager
def test_storage_property(self, exp_manager, mock_storage):
assert exp_manager.storage == mock_storage
def test_storage_property_initialization(self, mocker, mock_config):
mocker.patch.object(ExperienceManager, "_resolve_storage", return_value=mocker.MagicMock())
manager = ExperienceManager(config=mock_config)
assert manager._storage is None
_ = manager.storage
assert manager._storage is not None
def test_create_exp_write_disabled(self, exp_manager, mock_config):
mock_config.exp_pool.enable_write = False
exp = Experience(req="test", resp="response")
exp_manager.create_exp(exp)
exp_manager.storage.add_objs.assert_not_called()
def test_create_exp_write_enabled(self, exp_manager):
exp = Experience(req="test", resp="response")
exp_manager.create_exp(exp)
exp_manager.storage.add_objs.assert_called_once_with([exp])
exp_manager.storage.persist.assert_called_once_with(exp_manager.config.exp_pool.persist_path)
@pytest.mark.asyncio
async def test_query_exps_read_disabled(self, exp_manager, mock_config):
mock_config.exp_pool.enable_read = False
result = await exp_manager.query_exps("query")
assert result == []
@pytest.mark.asyncio
async def test_query_exps_with_exact_match(self, exp_manager, mocker):
req = "exact query"
exp1 = Experience(req=req, resp="response1")
exp2 = Experience(req="different query", resp="response2")
mock_node1 = mocker.MagicMock(metadata={"obj": exp1})
mock_node2 = mocker.MagicMock(metadata={"obj": exp2})
exp_manager.storage.aretrieve.return_value = [mock_node1, mock_node2]
result = await exp_manager.query_exps(req, query_type=QueryType.EXACT)
assert len(result) == 1
assert result[0].req == req
@pytest.mark.asyncio
async def test_query_exps_with_tag_filter(self, exp_manager, mocker):
tag = "test_tag"
exp1 = Experience(req="query1", resp="response1", tag=tag)
exp2 = Experience(req="query2", resp="response2", tag="other_tag")
mock_node1 = mocker.MagicMock(metadata={"obj": exp1})
mock_node2 = mocker.MagicMock(metadata={"obj": exp2})
exp_manager.storage.aretrieve.return_value = [mock_node1, mock_node2]
result = await exp_manager.query_exps("query", tag=tag)
assert len(result) == 1
assert result[0].tag == tag
def test_get_exps_count(self, exp_manager):
assert exp_manager.get_exps_count() == 10
def test_resolve_storage_bm25(self, mocker, mock_config):
mock_config.exp_pool.retrieval_type = ExperiencePoolRetrievalType.BM25
mocker.patch.object(ExperienceManager, "_create_bm25_storage", return_value=mocker.MagicMock())
manager = ExperienceManager(config=mock_config)
storage = manager._resolve_storage()
manager._create_bm25_storage.assert_called_once()
assert storage is not None
def test_resolve_storage_chroma(self, mocker, mock_config):
mock_config.exp_pool.retrieval_type = ExperiencePoolRetrievalType.CHROMA
mocker.patch.object(ExperienceManager, "_create_chroma_storage", return_value=mocker.MagicMock())
manager = ExperienceManager(config=mock_config)
storage = manager._resolve_storage()
manager._create_chroma_storage.assert_called_once()
assert storage is not None
def test_create_bm25_storage(self, mocker, mock_config):
mocker.patch("metagpt.rag.engines.SimpleEngine.from_objs", return_value=mocker.MagicMock())
mocker.patch("metagpt.rag.engines.SimpleEngine.from_index", return_value=mocker.MagicMock())
mocker.patch("metagpt.rag.engines.SimpleEngine.get_obj_nodes", return_value=[])
mocker.patch("metagpt.rag.engines.SimpleEngine._resolve_embed_model", return_value=mocker.MagicMock())
mocker.patch("llama_index.core.VectorStoreIndex", return_value=mocker.MagicMock())
mocker.patch("metagpt.rag.schema.BM25RetrieverConfig", return_value=mocker.MagicMock())
mocker.patch("pathlib.Path.exists", return_value=False)
manager = ExperienceManager(config=mock_config)
storage = manager._create_bm25_storage()
assert storage is not None
def test_create_chroma_storage(self, mocker, mock_config):
mocker.patch("metagpt.rag.engines.SimpleEngine.from_objs", return_value=mocker.MagicMock())
manager = ExperienceManager(config=mock_config)
storage = manager._create_chroma_storage()
assert storage is not None
def test_get_ranker_configs_use_llm_ranker_true(self, mock_config):
mock_config.exp_pool.use_llm_ranker = True
manager = ExperienceManager(config=mock_config)
ranker_configs = manager._get_ranker_configs()
assert len(ranker_configs) == 1
assert ranker_configs[0].top_n == DEFAULT_SIMILARITY_TOP_K
def test_get_ranker_configs_use_llm_ranker_false(self, mock_config):
mock_config.exp_pool.use_llm_ranker = False
manager = ExperienceManager(config=mock_config)
ranker_configs = manager._get_ranker_configs()
assert len(ranker_configs) == 0
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/exp_pool/test_decorator.py | tests/metagpt/exp_pool/test_decorator.py | import asyncio
import pytest
from metagpt.config2 import Config
from metagpt.configs.exp_pool_config import ExperiencePoolConfig
from metagpt.exp_pool.context_builders import SimpleContextBuilder
from metagpt.exp_pool.decorator import ExpCacheHandler, exp_cache
from metagpt.exp_pool.manager import ExperienceManager
from metagpt.exp_pool.perfect_judges import SimplePerfectJudge
from metagpt.exp_pool.schema import Experience, QueryType, Score
from metagpt.exp_pool.scorers import SimpleScorer
from metagpt.rag.engines import SimpleEngine
class TestExpCacheHandler:
@pytest.fixture
def mock_func(self, mocker):
return mocker.AsyncMock()
@pytest.fixture
def mock_exp_manager(self, mocker):
manager = mocker.MagicMock(spec=ExperienceManager)
manager.storage = mocker.MagicMock(spec=SimpleEngine)
manager.config = mocker.MagicMock(spec=Config)
manager.config.exp_pool = ExperiencePoolConfig()
manager.query_exps = mocker.AsyncMock()
manager.create_exp = mocker.MagicMock()
return manager
@pytest.fixture
def mock_scorer(self, mocker):
scorer = mocker.MagicMock(spec=SimpleScorer)
scorer.evaluate = mocker.AsyncMock()
return scorer
@pytest.fixture
def mock_perfect_judge(self, mocker):
return mocker.MagicMock(spec=SimplePerfectJudge)
@pytest.fixture
def mock_context_builder(self, mocker):
return mocker.MagicMock(spec=SimpleContextBuilder)
@pytest.fixture
def exp_cache_handler(self, mock_func, mock_exp_manager, mock_scorer, mock_perfect_judge, mock_context_builder):
return ExpCacheHandler(
func=mock_func,
args=(),
kwargs={"req": "test_req"},
exp_manager=mock_exp_manager,
exp_scorer=mock_scorer,
exp_perfect_judge=mock_perfect_judge,
context_builder=mock_context_builder,
)
@pytest.mark.asyncio
async def test_fetch_experiences(self, exp_cache_handler, mock_exp_manager):
mock_exp_manager.query_exps.return_value = [Experience(req="test_req", resp="test_resp")]
await exp_cache_handler.fetch_experiences()
mock_exp_manager.query_exps.assert_called_once_with(
"test_req", query_type=QueryType.SEMANTIC, tag=exp_cache_handler.tag
)
assert len(exp_cache_handler._exps) == 1
@pytest.mark.asyncio
async def test_get_one_perfect_exp(self, exp_cache_handler, mock_perfect_judge):
exp = Experience(req="test_req", resp="perfect_resp")
exp_cache_handler._exps = [exp]
mock_perfect_judge.is_perfect_exp.return_value = True
result = await exp_cache_handler.get_one_perfect_exp()
assert result == "perfect_resp"
@pytest.mark.asyncio
async def test_execute_function(self, exp_cache_handler, mock_func, mock_context_builder):
mock_context_builder.build.return_value = "built_context"
mock_func.return_value = "function_result"
await exp_cache_handler.execute_function()
mock_context_builder.build.assert_called_once()
mock_func.assert_called_once_with(req="built_context")
assert exp_cache_handler._raw_resp == "function_result"
assert exp_cache_handler._resp == "function_result"
@pytest.mark.asyncio
async def test_process_experience(self, exp_cache_handler, mock_scorer, mock_exp_manager):
exp_cache_handler._resp = "test_resp"
mock_scorer.evaluate.return_value = Score(val=8)
await exp_cache_handler.process_experience()
mock_scorer.evaluate.assert_called_once()
mock_exp_manager.create_exp.assert_called_once()
@pytest.mark.asyncio
async def test_evaluate_experience(self, exp_cache_handler, mock_scorer):
exp_cache_handler._resp = "test_resp"
mock_scorer.evaluate.return_value = Score(val=9)
await exp_cache_handler.evaluate_experience()
assert exp_cache_handler._score.val == 9
def test_save_experience(self, exp_cache_handler, mock_exp_manager):
exp_cache_handler._req = "test_req"
exp_cache_handler._resp = "test_resp"
exp_cache_handler._score = Score(val=7)
exp_cache_handler.save_experience()
mock_exp_manager.create_exp.assert_called_once()
def test_choose_wrapper_async(self, mocker):
async def async_func():
pass
wrapper = ExpCacheHandler.choose_wrapper(async_func, mocker.AsyncMock())
assert asyncio.iscoroutinefunction(wrapper)
def test_choose_wrapper_sync(self, mocker):
def sync_func():
pass
wrapper = ExpCacheHandler.choose_wrapper(sync_func, mocker.AsyncMock())
assert not asyncio.iscoroutinefunction(wrapper)
def test_validate_params(self):
with pytest.raises(ValueError):
ExpCacheHandler(func=lambda x: x, args=(), kwargs={})
def test_generate_tag(self):
class TestClass:
def test_method(self):
pass
handler = ExpCacheHandler(func=TestClass().test_method, args=(TestClass(),), kwargs={"req": "test"})
assert handler._generate_tag() == "TestClass.test_method"
handler = ExpCacheHandler(func=lambda x: x, args=(), kwargs={"req": "test"})
assert handler._generate_tag() == "<lambda>"
class TestExpCache:
@pytest.fixture
def mock_exp_manager(self, mocker, mock_config):
manager = mocker.MagicMock(spec=ExperienceManager)
manager.storage = mocker.MagicMock(spec=SimpleEngine)
manager.config = mock_config
manager.query_exps = mocker.AsyncMock()
manager.create_exp = mocker.MagicMock()
return manager
@pytest.fixture
def mock_scorer(self, mocker):
scorer = mocker.MagicMock(spec=SimpleScorer)
scorer.evaluate = mocker.AsyncMock(return_value=Score())
return scorer
@pytest.fixture
def mock_perfect_judge(self, mocker):
return mocker.MagicMock(spec=SimplePerfectJudge)
@pytest.fixture
def mock_config(self, mocker):
config = Config.default().model_copy(deep=True)
default = mocker.patch("metagpt.config2.Config.default")
default.return_value = config
return config
@pytest.mark.asyncio
async def test_exp_cache_disabled(self, mock_config, mock_exp_manager):
mock_config.exp_pool.enabled = False
@exp_cache(manager=mock_exp_manager)
async def test_func(req):
return "result"
result = await test_func(req="test")
assert result == "result"
mock_exp_manager.query_exps.assert_not_called()
@pytest.mark.asyncio
async def test_exp_cache_enabled_no_perfect_exp(self, mock_config, mock_exp_manager, mock_scorer):
mock_config.exp_pool.enabled = True
mock_config.exp_pool.enable_read = True
mock_config.exp_pool.enable_write = True
mock_exp_manager.query_exps.return_value = []
@exp_cache(manager=mock_exp_manager, scorer=mock_scorer)
async def test_func(req):
return "computed_result"
result = await test_func(req="test")
assert result == "computed_result"
mock_exp_manager.query_exps.assert_called()
mock_exp_manager.create_exp.assert_called()
@pytest.mark.asyncio
async def test_exp_cache_enabled_with_perfect_exp(self, mock_config, mock_exp_manager, mock_perfect_judge):
mock_config.exp_pool.enabled = True
mock_config.exp_pool.enable_read = True
perfect_exp = Experience(req="test", resp="perfect_result")
mock_exp_manager.query_exps.return_value = [perfect_exp]
mock_perfect_judge.is_perfect_exp.return_value = True
@exp_cache(manager=mock_exp_manager, perfect_judge=mock_perfect_judge)
async def test_func(req):
return "should_not_be_called"
result = await test_func(req="test")
assert result == "perfect_result"
mock_exp_manager.query_exps.assert_called_once()
mock_exp_manager.create_exp.assert_not_called()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/exp_pool/test_context_builders/test_rolezero_context_builder.py | tests/metagpt/exp_pool/test_context_builders/test_rolezero_context_builder.py | import pytest
from metagpt.const import EXPERIENCE_MASK
from metagpt.exp_pool.context_builders.base import BaseContextBuilder
from metagpt.exp_pool.context_builders.role_zero import RoleZeroContextBuilder
class TestRoleZeroContextBuilder:
@pytest.fixture
def context_builder(self):
return RoleZeroContextBuilder()
@pytest.mark.asyncio
async def test_build_empty_req(self, context_builder):
result = await context_builder.build(req=[])
assert result == []
@pytest.mark.asyncio
async def test_build_no_experiences(self, context_builder, mocker):
mocker.patch.object(BaseContextBuilder, "format_exps", return_value="")
req = [{"content": "Original content"}]
result = await context_builder.build(req=req)
assert result == req
@pytest.mark.asyncio
async def test_build_with_experiences(self, context_builder, mocker):
mocker.patch.object(BaseContextBuilder, "format_exps", return_value="Formatted experiences")
mocker.patch.object(RoleZeroContextBuilder, "replace_example_content", return_value="Updated content")
req = [{"content": "Original content 1"}]
result = await context_builder.build(req=req)
assert result == [{"content": "Updated content"}]
def test_replace_example_content(self, context_builder, mocker):
mocker.patch.object(RoleZeroContextBuilder, "fill_experience", return_value="Replaced content")
result = context_builder.replace_example_content("Original text", "New example content")
assert result == "Replaced content"
context_builder.fill_experience.assert_called_once_with("Original text", "New example content")
def test_fill_experience(self):
text = f"Start\n# Past Experience\n{EXPERIENCE_MASK}\n\n# Instruction\nEnd"
new_content = "New content"
result = RoleZeroContextBuilder.fill_experience(text, new_content)
expected = "Start\n# Past Experience\nNew content\n\n# Instruction\nEnd"
assert result == expected
def test_fill_experience_no_match(self):
text = "Start\nNo markers\nEnd"
new_content = "New content"
result = RoleZeroContextBuilder.fill_experience(text, new_content)
assert result == text
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/exp_pool/test_context_builders/test_base_context_builder.py | tests/metagpt/exp_pool/test_context_builders/test_base_context_builder.py | import pytest
from metagpt.exp_pool.context_builders.base import (
EXP_TEMPLATE,
BaseContextBuilder,
Experience,
)
from metagpt.exp_pool.schema import Metric, Score
class TestBaseContextBuilder:
class ConcreteContextBuilder(BaseContextBuilder):
async def build(self, *args, **kwargs):
pass
@pytest.fixture
def context_builder(self):
return self.ConcreteContextBuilder()
def test_format_exps(self, context_builder):
exp1 = Experience(req="req1", resp="resp1", metric=Metric(score=Score(val=8)))
exp2 = Experience(req="req2", resp="resp2", metric=Metric(score=Score(val=9)))
context_builder.exps = [exp1, exp2]
result = context_builder.format_exps()
expected = "\n".join(
[
f"1. {EXP_TEMPLATE.format(req='req1', resp='resp1', score=8)}",
f"2. {EXP_TEMPLATE.format(req='req2', resp='resp2', score=9)}",
]
)
assert result == expected
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/exp_pool/test_context_builders/test_simple_context_builder.py | tests/metagpt/exp_pool/test_context_builders/test_simple_context_builder.py | import pytest
from metagpt.exp_pool.context_builders.base import BaseContextBuilder
from metagpt.exp_pool.context_builders.simple import (
SIMPLE_CONTEXT_TEMPLATE,
SimpleContextBuilder,
)
class TestSimpleContextBuilder:
@pytest.fixture
def context_builder(self):
return SimpleContextBuilder()
@pytest.mark.asyncio
async def test_build_with_experiences(self, mocker, context_builder: SimpleContextBuilder):
# Mock the format_exps method
mock_exps = "Mocked experiences"
mocker.patch.object(BaseContextBuilder, "format_exps", return_value=mock_exps)
req = "Test request"
result = await context_builder.build(req=req)
expected = SIMPLE_CONTEXT_TEMPLATE.format(req=req, exps=mock_exps)
assert result == expected
@pytest.mark.asyncio
async def test_build_without_experiences(self, mocker, context_builder: SimpleContextBuilder):
# Mock the format_exps method to return an empty string
mocker.patch.object(BaseContextBuilder, "format_exps", return_value="")
req = "Test request"
result = await context_builder.build(req=req)
expected = SIMPLE_CONTEXT_TEMPLATE.format(req=req, exps="")
assert result == expected
@pytest.mark.asyncio
async def test_build_without_req(self, mocker, context_builder: SimpleContextBuilder):
# Mock the format_exps method
mock_exps = "Mocked experiences"
mocker.patch.object(BaseContextBuilder, "format_exps", return_value=mock_exps)
result = await context_builder.build(req="")
expected = SIMPLE_CONTEXT_TEMPLATE.format(req="", exps=mock_exps)
assert result == expected
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/exp_pool/test_serializers/test_role_zero.py | tests/metagpt/exp_pool/test_serializers/test_role_zero.py | import json
import pytest
from metagpt.exp_pool.serializers import RoleZeroSerializer
class TestRoleZeroSerializer:
@pytest.fixture
def serializer(self) -> RoleZeroSerializer:
return RoleZeroSerializer()
@pytest.fixture
def last_item(self) -> dict:
return {
"role": "user",
"content": "# Current Plan\nsome plan\n# Current Plan\nsome plan\n# Instruction\nsome instruction",
}
@pytest.fixture
def sample_req(self):
return [{"role": "user", "content": "..."}, {"role": "assistant", "content": "..."}]
def test_serialize_req_empty_input(self, serializer: RoleZeroSerializer):
assert serializer.serialize_req(req=[]) == ""
def test_serialize_req_with_content(self, serializer: RoleZeroSerializer, last_item: dict):
req = [
{"role": "user", "content": "Command Editor.read executed: file_path=test.py"},
{"role": "assistant", "content": "Some other content"},
last_item,
]
expected_output = json.dumps([{"role": "user", "content": "Command Editor.read executed: file_path=test.py"}])
assert serializer.serialize_req(req=req) == expected_output
def test_filter_req(self, serializer: RoleZeroSerializer):
req = [
{"role": "user", "content": "Command Editor.read executed: file_path=test1.py"},
{"role": "assistant", "content": "Some other content"},
{"role": "user", "content": "Command Editor.read executed: file_path=test2.py"},
{"role": "assistant", "content": "Final content"},
]
filtered_req = serializer._filter_req(req)
assert len(filtered_req) == 2
assert filtered_req[0]["content"] == "Command Editor.read executed: file_path=test1.py"
assert filtered_req[1]["content"] == "Command Editor.read executed: file_path=test2.py"
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/exp_pool/test_serializers/test_action_node.py | tests/metagpt/exp_pool/test_serializers/test_action_node.py | from typing import Type
import pytest
from metagpt.actions.action_node import ActionNode
from metagpt.exp_pool.serializers.action_node import ActionNodeSerializer
class TestActionNodeSerializer:
@pytest.fixture
def serializer(self):
return ActionNodeSerializer()
@pytest.fixture
def action_node(self):
class InstructContent:
def __init__(self, json_data):
self.json_data = json_data
def model_dump_json(self):
return self.json_data
action_node = ActionNode(key="", expected_type=Type[str], instruction="", example="")
action_node.instruct_content = InstructContent('{"key": "value"}')
return action_node
def test_serialize_resp(self, serializer: ActionNodeSerializer, action_node: ActionNode):
serialized = serializer.serialize_resp(action_node)
assert serialized == '{"key": "value"}'
def test_deserialize_resp(self, serializer: ActionNodeSerializer):
deserialized = serializer.deserialize_resp('{"key": "value"}')
assert isinstance(deserialized, ActionNode)
assert deserialized.instruct_content.model_dump_json() == '{"key": "value"}'
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/exp_pool/test_serializers/test_simple.py | tests/metagpt/exp_pool/test_serializers/test_simple.py | import pytest
from metagpt.exp_pool.serializers.simple import SimpleSerializer
class TestSimpleSerializer:
@pytest.fixture
def serializer(self):
return SimpleSerializer()
def test_serialize_req(self, serializer: SimpleSerializer):
# Test with different types of input
assert serializer.serialize_req(req=123) == "123"
assert serializer.serialize_req(req="test") == "test"
assert serializer.serialize_req(req=[1, 2, 3]) == "[1, 2, 3]"
assert serializer.serialize_req(req={"a": 1}) == "{'a': 1}"
def test_serialize_resp(self, serializer: SimpleSerializer):
# Test with different types of input
assert serializer.serialize_resp(456) == "456"
assert serializer.serialize_resp("response") == "response"
assert serializer.serialize_resp([4, 5, 6]) == "[4, 5, 6]"
assert serializer.serialize_resp({"b": 2}) == "{'b': 2}"
def test_deserialize_resp(self, serializer: SimpleSerializer):
# Test with different types of input
assert serializer.deserialize_resp("789") == "789"
assert serializer.deserialize_resp("test_response") == "test_response"
assert serializer.deserialize_resp("[7, 8, 9]") == "[7, 8, 9]"
assert serializer.deserialize_resp("{'c': 3}") == "{'c': 3}"
def test_roundtrip(self, serializer: SimpleSerializer):
# Test serialization and deserialization roundtrip
original = "test_roundtrip"
serialized = serializer.serialize_resp(original)
deserialized = serializer.deserialize_resp(serialized)
assert deserialized == original
@pytest.mark.parametrize("input_value", [123, "test", [1, 2, 3], {"a": 1}, None])
def test_serialize_req_types(self, serializer: SimpleSerializer, input_value):
# Test serialize_req with various input types
result = serializer.serialize_req(req=input_value)
assert isinstance(result, str)
assert result == str(input_value)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/exp_pool/test_scorers/test_simple_scorer.py | tests/metagpt/exp_pool/test_scorers/test_simple_scorer.py | import json
import pytest
from metagpt.exp_pool.schema import Score
from metagpt.exp_pool.scorers.simple import SIMPLE_SCORER_TEMPLATE, SimpleScorer
from metagpt.llm import BaseLLM
class TestSimpleScorer:
@pytest.fixture
def mock_llm(self, mocker):
mock_llm = mocker.MagicMock(spec=BaseLLM)
return mock_llm
@pytest.fixture
def simple_scorer(self, mock_llm):
return SimpleScorer(llm=mock_llm)
def test_init(self, mock_llm):
scorer = SimpleScorer(llm=mock_llm)
assert isinstance(scorer.llm, BaseLLM)
@pytest.mark.asyncio
async def test_evaluate(self, simple_scorer, mock_llm, mocker):
# Mock request and response
req = "What is the capital of France?"
resp = "The capital of France is Paris."
# Mock LLM response
mock_llm_response = '{"val": 9, "reason": "Accurate and concise answer"}'
mock_llm.aask.return_value = f"```json\n{mock_llm_response}\n```"
# Mock CodeParser.parse_code
mocker.patch("metagpt.utils.common.CodeParser.parse_code", return_value=mock_llm_response)
# Test evaluate method
result = await simple_scorer.evaluate(req, resp)
# Assert LLM was called with correct prompt
expected_prompt = SIMPLE_SCORER_TEMPLATE.format(req=req, resp=resp)
mock_llm.aask.assert_called_once_with(expected_prompt)
# Assert the result is correct
assert isinstance(result, Score)
assert result.val == 9
assert result.reason == "Accurate and concise answer"
@pytest.mark.asyncio
async def test_evaluate_invalid_response(self, simple_scorer, mock_llm, mocker):
# Mock request and response
req = "What is the capital of France?"
resp = "The capital of France is Paris."
# Mock LLM response with invalid JSON
mock_llm_response = "Invalid JSON"
mock_llm.aask.return_value = f"```json\n{mock_llm_response}\n```"
# Mock CodeParser.parse_code
mocker.patch("metagpt.utils.common.CodeParser.parse_code", return_value=mock_llm_response)
# Test evaluate method with invalid response
with pytest.raises(json.JSONDecodeError):
await simple_scorer.evaluate(req, resp)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/exp_pool/test_perfect_judges/test_simple_perfect_judge.py | tests/metagpt/exp_pool/test_perfect_judges/test_simple_perfect_judge.py | import pytest
from metagpt.exp_pool.perfect_judges import SimplePerfectJudge
from metagpt.exp_pool.schema import MAX_SCORE, Experience, Metric, Score
class TestSimplePerfectJudge:
@pytest.fixture
def simple_perfect_judge(self):
return SimplePerfectJudge()
@pytest.mark.asyncio
async def test_is_perfect_exp_perfect_match(self, simple_perfect_judge):
exp = Experience(req="test_request", resp="resp", metric=Metric(score=Score(val=MAX_SCORE)))
result = await simple_perfect_judge.is_perfect_exp(exp, "test_request")
assert result is True
@pytest.mark.asyncio
async def test_is_perfect_exp_imperfect_score(self, simple_perfect_judge):
exp = Experience(req="test_request", resp="resp", metric=Metric(score=Score(val=MAX_SCORE - 1)))
result = await simple_perfect_judge.is_perfect_exp(exp, "test_request")
assert result is False
@pytest.mark.asyncio
async def test_is_perfect_exp_mismatched_request(self, simple_perfect_judge):
exp = Experience(req="test_request", resp="resp", metric=Metric(score=Score(val=MAX_SCORE)))
result = await simple_perfect_judge.is_perfect_exp(exp, "different_request")
assert result is False
@pytest.mark.asyncio
async def test_is_perfect_exp_no_metric(self, simple_perfect_judge):
exp = Experience(req="test_request", resp="resp")
result = await simple_perfect_judge.is_perfect_exp(exp, "test_request")
assert result is False
@pytest.mark.asyncio
async def test_is_perfect_exp_no_score(self, simple_perfect_judge):
exp = Experience(req="test_request", resp="resp", metric=Metric())
result = await simple_perfect_judge.is_perfect_exp(exp, "test_request")
assert result is False
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/strategy/test_planner.py | tests/metagpt/strategy/test_planner.py | from metagpt.schema import Plan, Task
from metagpt.strategy.planner import Planner
from metagpt.strategy.task_type import TaskType
MOCK_TASK_MAP = {
"1": Task(
task_id="1",
instruction="test instruction for finished task",
task_type=TaskType.EDA.type_name,
dependent_task_ids=[],
code="some finished test code",
result="some finished test result",
is_finished=True,
),
"2": Task(
task_id="2",
instruction="test instruction for current task",
task_type=TaskType.DATA_PREPROCESS.type_name,
dependent_task_ids=["1"],
),
}
MOCK_PLAN = Plan(
goal="test goal",
tasks=list(MOCK_TASK_MAP.values()),
task_map=MOCK_TASK_MAP,
current_task_id="2",
)
def test_planner_get_plan_status():
planner = Planner(plan=MOCK_PLAN)
status = planner.get_plan_status()
assert "some finished test code" in status
assert "some finished test result" in status
assert "test instruction for current task" in status
assert TaskType.DATA_PREPROCESS.value.guidance in status # current task guidance
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/strategy/test_solver.py | tests/metagpt/strategy/test_solver.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/1/31 13:54
@Author : alexanderwu
@File : test_solver.py
"""
import pytest
from metagpt.actions.action_graph import ActionGraph
from metagpt.llm import LLM
from metagpt.strategy.search_space import SearchSpace
from metagpt.strategy.solver import NaiveSolver
@pytest.mark.asyncio
async def test_solver():
from metagpt.actions.write_prd_an import (
COMPETITIVE_ANALYSIS,
ISSUE_TYPE,
PRODUCT_GOALS,
REQUIREMENT_POOL,
)
graph = ActionGraph()
graph.add_node(ISSUE_TYPE)
graph.add_node(PRODUCT_GOALS)
graph.add_node(COMPETITIVE_ANALYSIS)
graph.add_node(REQUIREMENT_POOL)
graph.add_edge(ISSUE_TYPE, PRODUCT_GOALS)
graph.add_edge(PRODUCT_GOALS, COMPETITIVE_ANALYSIS)
graph.add_edge(PRODUCT_GOALS, REQUIREMENT_POOL)
graph.add_edge(COMPETITIVE_ANALYSIS, REQUIREMENT_POOL)
search_space = SearchSpace()
llm = LLM()
context = "Create a 2048 game"
solver = NaiveSolver(graph, search_space, llm, context)
await solver.solve()
print("## graph.nodes")
print(graph.nodes)
for k, v in graph.nodes.items():
print(f"{v.key} | prevs: {[i.key for i in v.prevs]} | nexts: {[i.key for i in v.nexts]}")
assert len(graph.nodes) == 4
assert len(graph.execution_order) == 4
assert graph.execution_order == [ISSUE_TYPE.key, PRODUCT_GOALS.key, COMPETITIVE_ANALYSIS.key, REQUIREMENT_POOL.key]
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/strategy/__init__.py | tests/metagpt/strategy/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/12/30 00:33
@Author : alexanderwu
@File : __init__.py
"""
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/strategy/examples/test_game24.py | tests/metagpt/strategy/examples/test_game24.py | # -*- coding: utf-8 -*-
# @Date : 12/25/2023 1:36 AM
# @Author : stellahong (stellahong@fuzhi.ai)
# @Desc :
import re
from typing import Dict
from metagpt.strategy.tot import TreeofThought
from metagpt.strategy.tot_schema import (
BaseEvaluator,
BaseParser,
Strategy,
ThoughtSolverConfig,
)
from tests.metagpt.strategy.prompt_templates.game24 import propose_prompt, value_prompt
class Game24Parser(BaseParser):
propose_prompt: str = propose_prompt
value_prompt: str = value_prompt
def __call__(self, input_text: str) -> str:
last_line = input_text.strip().split("\n")[-1]
return last_line.split("left: ")[-1].split(")")[0]
def propose(self, current_state: str, **kwargs) -> str:
return self.propose_prompt.format(input=current_state, **kwargs)
def value(self, input: str = "", **kwargs) -> str:
node_result = self(input)
return self.value_prompt.format(input=node_result)
class Game24Evaluator(BaseEvaluator):
value_map: Dict[str, float] = {"impossible": 0.001, "likely": 1, "sure": 20} # TODO: ad hoc
status_map: Dict = {val: key for key, val in value_map.items()}
def __call__(self, evaluation: str, **kwargs) -> float:
try:
matches = re.findall(r"\b(impossible|sure|likely)\b", evaluation)
value = self.value_map[matches[0]]
except:
value = 0.001
return value
def status_verify(self, value):
status = False
if value in self.status_map:
status_value = self.status_map[value]
if status_value != "impossible":
status = True
return status
def test_game24():
import asyncio
initial_prompt = """4 5 6 10"""
parser = Game24Parser()
evaluator = Game24Evaluator()
config = ThoughtSolverConfig(n_generate_sample=5, parser=parser, evaluator=evaluator)
tot = TreeofThought(strategy=Strategy.BFS, config=config)
asyncio.run(tot.solve(init_prompt=initial_prompt))
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/strategy/examples/test_creative_writing.py | tests/metagpt/strategy/examples/test_creative_writing.py | # -*- coding: utf-8 -*-
# @Date : 12/25/2023 1:06 PM
# @Author : stellahong (stellahong@fuzhi.ai)
# @Desc :
import re
from typing import Dict
from metagpt.strategy.tot import TreeofThought
from metagpt.strategy.tot_schema import (
BaseEvaluator,
BaseParser,
Strategy,
ThoughtSolverConfig,
)
from tests.metagpt.strategy.prompt_templates.creative_writing import (
cot_prompt,
vote_prompt,
)
class TextGenParser(BaseParser):
propose_prompt: str = cot_prompt
value_prompt: str = vote_prompt
def __call__(self, input_text: str) -> str:
return input_text
def propose(self, current_state: str, **kwargs) -> str:
return self.propose_prompt.format(input=current_state, **kwargs)
def value(self, input: str = "", **kwargs) -> str:
# node_result = self(input)
id = kwargs.get("node_id", "0")
return self.value_prompt + f"Choice {id}:\n{input}\n"
class TextGenEvaluator(BaseEvaluator):
value_map: Dict[str, float] = {"impossible": 0.001, "likely": 1, "sure": 20} # TODO: ad hoc
status_map: Dict = {val: key for key, val in value_map.items()}
def __call__(self, evaluation: str, **kwargs) -> float:
try:
value = 0
node_id = kwargs.get("node_id", "0")
pattern = r".*best choice is .*(\d+).*"
match = re.match(pattern, evaluation, re.DOTALL)
if match:
vote = int(match.groups()[0])
print(vote)
if vote == int(node_id):
value = 1
except:
value = 0
return value
def status_verify(self, value):
status = False
if value in self.status_map:
status_value = self.status_map[value]
if status_value != "impossible":
status = True
return status
def test_creative_writing():
import asyncio
initial_prompt = """It isn't difficult to do a handstand if you just stand on your hands. It caught him off guard that space smelled of seared steak. When she didn’t like a guy who was trying to pick her up, she started using sign language. Each person who knows you has a different perception of who you are."""
parser = TextGenParser()
evaluator = TextGenEvaluator()
config = ThoughtSolverConfig(max_step=2, n_generate_sample=1, n_select_sample=1, parser=parser, evaluator=evaluator)
tot_base = TreeofThought(strategy=Strategy.BFS, config=config)
asyncio.run(tot_base.solve(init_prompt=initial_prompt))
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/strategy/examples/__init__.py | tests/metagpt/strategy/examples/__init__.py | # -*- coding: utf-8 -*-
# @Date : 12/26/2023 3:32 PM
# @Author : stellahong (stellahong@fuzhi.ai)
# @Desc :
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/strategy/prompt_templates/creative_writing.py | tests/metagpt/strategy/prompt_templates/creative_writing.py | standard_prompt = """
Write a coherent passage of 4 short paragraphs. The end sentence of each paragraph must be: {input}
"""
cot_prompt = """
Write a coherent passage of 4 short paragraphs. The end sentence of each paragraph must be: {input}
Make a plan then write. Your output should be like:
Plan:
<Your plan here with json format>
Passage:
<Your passage here with json format>
"""
vote_prompt = """Given an instruction and several choices, decide which choice is most promising. Analyze each choice in detail, then conclude in the last line "The best choice is {s}", where s the integer id of the choice.
"""
compare_prompt = """Briefly analyze the coherency of the following two passages. Conclude in the last line "The more coherent passage is 1", "The more coherent passage is 2", or "The two passages are similarly coherent".
"""
score_prompt = """Analyze the following passage, then at the last line conclude "Thus the coherency score is {s}", where s is an integer from 1 to 10.
"""
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/strategy/prompt_templates/game24.py | tests/metagpt/strategy/prompt_templates/game24.py | # 5-shot
standard_prompt = """Use numbers and basic arithmetic operations (+ - * /) to obtain 24.
Input: 4 4 6 8
Answer: (4 + 8) * (6 - 4) = 24
Input: 2 9 10 12
Answer: 2 * 12 * (10 - 9) = 24
Input: 4 9 10 13
Answer: (13 - 9) * (10 - 4) = 24
Input: 1 4 8 8
Answer: (8 / 4 + 1) * 8 = 24
Input: 5 5 5 9
Answer: 5 + 5 + 5 + 9 = 24
Input: {input}
"""
# 5-shot
cot_prompt = """Use numbers and basic arithmetic operations (+ - * /) to obtain 24. Each step, you are only allowed to choose two of the remaining numbers to obtain a new number.
Input: 4 4 6 8
Steps:
4 + 8 = 12 (left: 4 6 12)
6 - 4 = 2 (left: 2 12)
2 * 12 = 24 (left: 24)
Answer: (6 - 4) * (4 + 8) = 24
Input: 2 9 10 12
Steps:
12 * 2 = 24 (left: 9 10 24)
10 - 9 = 1 (left: 1 24)
24 * 1 = 24 (left: 24)
Answer: (12 * 2) * (10 - 9) = 24
Input: 4 9 10 13
Steps:
13 - 10 = 3 (left: 3 4 9)
9 - 3 = 6 (left: 4 6)
4 * 6 = 24 (left: 24)
Answer: 4 * (9 - (13 - 10)) = 24
Input: 1 4 8 8
Steps:
8 / 4 = 2 (left: 1 2 8)
1 + 2 = 3 (left: 3 8)
3 * 8 = 24 (left: 24)
Answer: (1 + 8 / 4) * 8 = 24
Input: 5 5 5 9
Steps:
5 + 5 = 10 (left: 5 9 10)
10 + 5 = 15 (left: 9 15)
15 + 9 = 24 (left: 24)
Answer: ((5 + 5) + 5) + 9 = 24
Input: {input}
"""
# 1-shot
propose_prompt = """Here is an Example for 1 input and 8 possible thoughts:
Input: 2 8 8 14
Possible next steps:
2 + 8 = 10 (left: 8 10 14)
8 / 2 = 4 (left: 4 8 14)
14 + 2 = 16 (left: 8 8 16)
2 * 8 = 16 (left: 8 14 16)
8 - 2 = 6 (left: 6 8 14)
14 - 8 = 6 (left: 2 6 8)
14 / 2 = 7 (left: 7 8 8)
14 - 2 = 12 (left: 8 8 12)
Here is my task for 1 input and {n_generate_sample} possible thoughts:
Input: {input}
Possible next steps:
"""
value_prompt = """Evaluate if given numbers can reach 24 (sure/likely/impossible)
10 14
10 + 14 = 24
sure
11 12
11 + 12 = 23
12 - 11 = 1
11 * 12 = 132
11 / 12 = 0.91
impossible
4 4 10
4 + 4 + 10 = 8 + 10 = 18
4 * 10 - 4 = 40 - 4 = 36
(10 - 4) * 4 = 6 * 4 = 24
sure
4 9 11
9 + 11 + 4 = 20 + 4 = 24
sure
5 7 8
5 + 7 + 8 = 12 + 8 = 20
(8 - 5) * 7 = 3 * 7 = 21
I cannot obtain 24 now, but numbers are within a reasonable range
likely
5 6 6
5 + 6 + 6 = 17
(6 - 5) * 6 = 1 * 6 = 6
I cannot obtain 24 now, but numbers are within a reasonable range
likely
10 10 11
10 + 10 + 11 = 31
(11 - 10) * 10 = 10
10 10 10 are all too big
impossible
1 3 3
1 * 3 * 3 = 9
(1 + 3) * 3 = 12
1 3 3 are all too small
impossible
{input}
"""
value_last_step_prompt = """Use numbers and basic arithmetic operations (+ - * /) to obtain 24. Given an input and an answer, give a judgement (sure/impossible) if the answer is correct, i.e. it uses each input exactly once and no other numbers, and reach 24.
Input: 4 4 6 8
Answer: (4 + 8) * (6 - 4) = 24
Judge:
sure
Input: 2 9 10 12
Answer: 2 * 12 * (10 - 9) = 24
Judge:
sure
Input: 4 9 10 13
Answer: (13 - 9) * (10 - 4) = 24
Judge:
sure
Input: 4 4 6 8
Answer: (4 + 8) * (6 - 4) + 1 = 25
Judge:
impossible
Input: 2 9 10 12
Answer: 2 * (12 - 10) = 24
Judge:
impossible
Input: 4 9 10 13
Answer: (13 - 4) * (10 - 9) = 24
Judge:
impossible
Input: {input}
Answer: {answer}
Judge:"""
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/strategy/prompt_templates/__init__.py | tests/metagpt/strategy/prompt_templates/__init__.py | # -*- coding: utf-8 -*-
# @Date : 12/23/2023 5:21 PM
# @Author : stellahong (stellahong@fuzhi.ai)
# @Desc :
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/learn/test_google_search.py | tests/metagpt/learn/test_google_search.py | import pytest
from pydantic import BaseModel
from metagpt.learn.google_search import google_search
from metagpt.tools import SearchEngineType
@pytest.mark.asyncio
async def test_google_search(search_engine_mocker):
class Input(BaseModel):
input: str
inputs = [{"input": "ai agent"}]
for i in inputs:
seed = Input(**i)
result = await google_search(
seed.input,
engine=SearchEngineType.SERPER_GOOGLE,
api_key="mock-serper-key",
)
assert result != ""
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/learn/test_text_to_speech.py | tests/metagpt/learn/test_text_to_speech.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/8/18
@Author : mashenquan
@File : test_text_to_speech.py
@Desc : Unit tests.
"""
import pytest
from azure.cognitiveservices.speech import ResultReason, SpeechSynthesizer
from metagpt.config2 import config
from metagpt.learn.text_to_speech import text_to_speech
from metagpt.tools.iflytek_tts import IFlyTekTTS
from metagpt.utils.s3 import S3
@pytest.mark.asyncio
async def test_azure_text_to_speech(mocker):
# mock
config.iflytek_api_key = None
config.iflytek_api_secret = None
config.iflytek_app_id = None
mock_result = mocker.Mock()
mock_result.audio_data = b"mock audio data"
mock_result.reason = ResultReason.SynthesizingAudioCompleted
mock_data = mocker.Mock()
mock_data.get.return_value = mock_result
mocker.patch.object(SpeechSynthesizer, "speak_ssml_async", return_value=mock_data)
mocker.patch.object(S3, "cache", return_value="http://mock.s3.com/1.wav")
# Prerequisites
assert not config.iflytek_app_id
assert not config.iflytek_api_key
assert not config.iflytek_api_secret
assert config.azure_tts_subscription_key and config.azure_tts_subscription_key != "YOUR_API_KEY"
assert config.azure_tts_region
config.copy()
# test azure
data = await text_to_speech("panda emoji", config=config)
assert "base64" in data or "http" in data
@pytest.mark.asyncio
async def test_iflytek_text_to_speech(mocker):
# mock
config.azure_tts_subscription_key = None
config.azure_tts_region = None
mocker.patch.object(IFlyTekTTS, "synthesize_speech", return_value=None)
mock_data = mocker.AsyncMock()
mock_data.read.return_value = b"mock iflytek"
mock_reader = mocker.patch("aiofiles.open")
mock_reader.return_value.__aenter__.return_value = mock_data
mocker.patch.object(S3, "cache", return_value="http://mock.s3.com/1.mp3")
# Prerequisites
assert config.iflytek_app_id
assert config.iflytek_api_key
assert config.iflytek_api_secret
assert not config.azure_tts_subscription_key or config.azure_tts_subscription_key == "YOUR_API_KEY"
assert not config.azure_tts_region
# test azure
data = await text_to_speech("panda emoji", config=config)
assert "base64" in data or "http" in data
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/learn/test_text_to_embedding.py | tests/metagpt/learn/test_text_to_embedding.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/8/18
@Author : mashenquan
@File : test_text_to_embedding.py
@Desc : Unit tests.
"""
import json
from pathlib import Path
import pytest
from metagpt.config2 import config
from metagpt.learn.text_to_embedding import text_to_embedding
from metagpt.utils.common import aread
@pytest.mark.asyncio
async def test_text_to_embedding(mocker):
# mock
mock_post = mocker.patch("aiohttp.ClientSession.post")
mock_response = mocker.AsyncMock()
mock_response.status = 200
data = await aread(Path(__file__).parent / "../../data/openai/embedding.json")
mock_response.json.return_value = json.loads(data)
mock_post.return_value.__aenter__.return_value = mock_response
config.get_openai_llm().proxy = mocker.PropertyMock(return_value="http://mock.proxy")
# Prerequisites
assert config.get_openai_llm().api_key
assert config.get_openai_llm().proxy
v = await text_to_embedding(text="Panda emoji", config=config)
assert len(v.data) > 0
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/learn/test_skill_loader.py | tests/metagpt/learn/test_skill_loader.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/9/19
@Author : mashenquan
@File : test_skill_loader.py
@Desc : Unit tests.
"""
from pathlib import Path
import pytest
from metagpt.learn.skill_loader import SkillsDeclaration
@pytest.mark.asyncio
async def test_suite(context):
context.kwargs.agent_skills = [
{"id": 1, "name": "text_to_speech", "type": "builtin", "config": {}, "enabled": True},
{"id": 2, "name": "text_to_image", "type": "builtin", "config": {}, "enabled": True},
{"id": 3, "name": "ai_call", "type": "builtin", "config": {}, "enabled": True},
{"id": 3, "name": "data_analysis", "type": "builtin", "config": {}, "enabled": True},
{"id": 5, "name": "crawler", "type": "builtin", "config": {"engine": "ddg"}, "enabled": True},
{"id": 6, "name": "knowledge", "type": "builtin", "config": {}, "enabled": True},
{"id": 6, "name": "web_search", "type": "builtin", "config": {}, "enabled": True},
]
pathname = Path(__file__).parent / "../../../docs/.well-known/skills.yaml"
loader = await SkillsDeclaration.load(skill_yaml_file_name=pathname)
skills = loader.get_skill_list(context=context)
assert skills
assert len(skills) >= 3
for desc, name in skills.items():
assert desc
assert name
entity = loader.entities.get("Assistant")
assert entity
assert entity.skills
for sk in entity.skills:
assert sk
assert sk.arguments
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/learn/__init__.py | tests/metagpt/learn/__init__.py | python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false | |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/learn/test_text_to_image.py | tests/metagpt/learn/test_text_to_image.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/8/18
@Author : mashenquan
@File : test_text_to_image.py
@Desc : Unit tests.
"""
import base64
import openai
import pytest
from pydantic import BaseModel
from metagpt.config2 import config
from metagpt.learn.text_to_image import text_to_image
from metagpt.tools.metagpt_text_to_image import MetaGPTText2Image
from metagpt.tools.openai_text_to_image import OpenAIText2Image
from metagpt.utils.s3 import S3
@pytest.mark.asyncio
async def test_text_to_image(mocker):
# mock
mocker.patch.object(MetaGPTText2Image, "text_2_image", return_value=b"mock MetaGPTText2Image")
mocker.patch.object(OpenAIText2Image, "text_2_image", return_value=b"mock OpenAIText2Image")
mocker.patch.object(S3, "cache", return_value="http://mock/s3")
assert config.metagpt_tti_url
data = await text_to_image("Panda emoji", size_type="512x512", config=config)
assert "base64" in data or "http" in data
@pytest.mark.asyncio
async def test_openai_text_to_image(mocker):
# mocker
mock_url = mocker.Mock()
mock_url.url.return_value = "http://mock.com/0.png"
class _MockData(BaseModel):
data: list
mock_data = _MockData(data=[mock_url])
mocker.patch.object(openai.resources.images.AsyncImages, "generate", return_value=mock_data)
mock_post = mocker.patch("aiohttp.ClientSession.get")
mock_response = mocker.AsyncMock()
mock_response.status = 200
mock_response.read.return_value = base64.b64encode(b"success")
mock_post.return_value.__aenter__.return_value = mock_response
mocker.patch.object(S3, "cache", return_value="http://mock.s3.com/0.png")
config.metagpt_tti_url = None
assert config.get_openai_llm()
data = await text_to_image("Panda emoji", size_type="512x512", config=config)
assert "base64" in data or "http" in data
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/data/tools/test_script_for_file_manager.py | tests/data/tools/test_script_for_file_manager.py | python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false | |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/data/code/python/1.py | tests/data/code/python/1.py | """
===============
Degree Analysis
===============
This example shows several ways to visualize the distribution of the degree of
nodes with two common techniques: a *degree-rank plot* and a
*degree histogram*.
In this example, a random Graph is generated with 100 nodes. The degree of
each node is determined, and a figure is generated showing three things:
1. The subgraph of connected components
2. The degree-rank plot for the Graph, and
3. The degree histogram
"""
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
G = nx.gnp_random_graph(100, 0.02, seed=10374196)
degree_sequence = sorted((d for n, d in G.degree()), reverse=True)
dmax = max(degree_sequence)
fig = plt.figure("Degree of a random graph", figsize=(8, 8))
# Create a gridspec for adding subplots of different sizes
axgrid = fig.add_gridspec(5, 4)
ax0 = fig.add_subplot(axgrid[0:3, :])
Gcc = G.subgraph(sorted(nx.connected_components(G), key=len, reverse=True)[0])
pos = nx.spring_layout(Gcc, seed=10396953)
nx.draw_networkx_nodes(Gcc, pos, ax=ax0, node_size=20)
nx.draw_networkx_edges(Gcc, pos, ax=ax0, alpha=0.4)
ax0.set_title("Connected components of G")
ax0.set_axis_off()
print("aa")
ax1 = fig.add_subplot(axgrid[3:, :2])
ax1.plot(degree_sequence, "b-", marker="o")
ax1.set_title("Degree Rank Plot")
ax1.set_ylabel("Degree")
ax1.set_xlabel("Rank")
ax2 = fig.add_subplot(axgrid[3:, 2:])
ax2.bar(*np.unique(degree_sequence, return_counts=True))
ax2.set_title("Degree histogram")
ax2.set_xlabel("Degree")
ax2.set_ylabel("# of Nodes")
fig.tight_layout()
plt.show()
class Game:
def __init__(self):
self.snake = Snake(400, 300, 5, 0)
self.enemy = Enemy(100, 100, 3, 1)
self.power_up = PowerUp(200, 200)
def handle_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
return False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
self.snake.change_direction(0)
elif event.key == pygame.K_DOWN:
self.snake.change_direction(1)
elif event.key == pygame.K_LEFT:
self.snake.change_direction(2)
elif event.key == pygame.K_RIGHT:
self.snake.change_direction(3)
return True
def update(self):
self.snake.move()
self.enemy.move()
def draw(self, screen):
self.snake.draw(screen)
self.enemy.draw(screen)
self.power_up.draw(screen)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/data/incremental_dev_project/mock.py | tests/data/incremental_dev_project/mock.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/01/17
@Author : mannaandpoem
@File : mock.py
"""
NEW_REQUIREMENT_SAMPLE = """
Adding graphical interface functionality to enhance the user experience in the number-guessing game. The existing number-guessing game currently relies on command-line input for numbers. The goal is to introduce a graphical interface to improve the game's usability and visual appeal
"""
PRD_SAMPLE = """
## Language
en_us
## Programming Language
Python
## Original Requirements
Make a simple number guessing game
## Product Goals
- Ensure a user-friendly interface for the game
- Provide a challenging yet enjoyable game experience
- Design the game to be easily extendable for future features
## User Stories
- As a player, I want to guess numbers and receive feedback on whether my guess is too high or too low
- As a player, I want to be able to set the difficulty level by choosing the range of possible numbers
- As a player, I want to see my previous guesses to strategize my next guess
- As a player, I want to know how many attempts it took me to guess the number once I get it right
## Competitive Analysis
- Guess The Number Game A: Basic text interface, no difficulty levels
- Number Master B: Has difficulty levels, but cluttered interface
- Quick Guess C: Sleek design, but lacks performance tracking
- NumGuess D: Good performance tracking, but not mobile-friendly
- GuessIt E: Mobile-friendly, but too many ads
- Perfect Guess F: Offers hints, but the hints are not very helpful
- SmartGuesser G: Has a learning mode, but lacks a competitive edge
## Competitive Quadrant Chart
quadrantChart
title "User Engagement and Game Complexity"
x-axis "Low Complexity" --> "High Complexity"
y-axis "Low Engagement" --> "High Engagement"
quadrant-1 "Too Simple"
quadrant-2 "Niche Appeal"
quadrant-3 "Complex & Unengaging"
quadrant-4 "Sweet Spot"
"Guess The Number Game A": [0.2, 0.4]
"Number Master B": [0.5, 0.3]
"Quick Guess C": [0.6, 0.7]
"NumGuess D": [0.4, 0.6]
"GuessIt E": [0.7, 0.5]
"Perfect Guess F": [0.6, 0.4]
"SmartGuesser G": [0.8, 0.6]
"Our Target Product": [0.5, 0.8]
## Requirement Analysis
The game should be simple yet engaging, allowing players of different skill levels to enjoy it. It should provide immediate feedback and track the player's performance. The game should also be designed with a clean and intuitive interface, and it should be easy to add new features in the future.
## Requirement Pool
- ['P0', 'Implement the core game logic to randomly select a number and allow the user to guess it']
- ['P0', 'Design a user interface that displays the game status and results clearly']
- ['P1', 'Add difficulty levels by varying the range of possible numbers']
- ['P1', 'Keep track of and display the number of attempts for each game session']
- ['P2', "Store and show the history of the player's guesses during a game session"]
## UI Design draft
The UI will feature a clean and minimalist design with a number input field, submit button, and messages area to provide feedback. There will be options to select the difficulty level and a display showing the number of attempts and history of past guesses.
## Anything UNCLEAR"""
DESIGN_SAMPLE = """
## Implementation approach
We will create a Python-based number guessing game with a simple command-line interface. For the user interface, we will use the built-in 'input' and 'print' functions for interaction. The random library will be used for generating random numbers. We will structure the code to be modular and easily extendable, separating the game logic from the user interface.
## File list
- main.py
- game.py
- ui.py
## Data structures and interfaces
classDiagram
class Game {
-int secret_number
-int min_range
-int max_range
-list attempts
+__init__(difficulty: str)
+start_game()
+check_guess(guess: int) str
+get_attempts() int
+get_history() list
}
class UI {
+start()
+display_message(message: str)
+get_user_input(prompt: str) str
+show_attempts(attempts: int)
+show_history(history: list)
+select_difficulty() str
}
class Main {
+main()
}
Main --> UI
UI --> Game
## Program call flow
sequenceDiagram
participant M as Main
participant UI as UI
participant G as Game
M->>UI: start()
UI->>UI: select_difficulty()
UI-->>G: __init__(difficulty)
G->>G: start_game()
loop Game Loop
UI->>UI: get_user_input("Enter your guess:")
UI-->>G: check_guess(guess)
G->>UI: display_message(feedback)
G->>UI: show_attempts(attempts)
G->>UI: show_history(history)
end
G->>UI: display_message("Correct! Game over.")
UI->>M: main() # Game session ends
## Anything UNCLEAR
The requirement analysis suggests the need for a clean and intuitive interface. Since we are using a command-line interface, we need to ensure that the text-based UI is as user-friendly as possible. Further clarification on whether a graphical user interface (GUI) is expected in the future would be helpful for planning the extendability of the game."""
TASK_SAMPLE = """
## Required Python packages
- random==2.2.1
## Required Other language third-party packages
- No third-party dependencies required
## Logic Analysis
- ['game.py', 'Contains Game class with methods __init__, start_game, check_guess, get_attempts, get_history and uses random library for generating secret_number']
- ['ui.py', 'Contains UI class with methods start, display_message, get_user_input, show_attempts, show_history, select_difficulty and interacts with Game class']
- ['main.py', 'Contains Main class with method main that initializes UI class and starts the game loop']
## Task list
- game.py
- ui.py
- main.py
## Full API spec
## Shared Knowledge
`game.py` contains the core game logic and is used by `ui.py` to interact with the user. `main.py` serves as the entry point to start the game.
## Anything UNCLEAR
The requirement analysis suggests the need for a clean and intuitive interface. Since we are using a command-line interface, we need to ensure that the text-based UI is as user-friendly as possible. Further clarification on whether a graphical user interface (GUI) is expected in the future would be helpful for planning the extendability of the game."""
OLD_CODE_SAMPLE = """
--- game.py
```## game.py
import random
class Game:
def __init__(self, difficulty: str = 'medium'):
self.min_range, self.max_range = self._set_difficulty(difficulty)
self.secret_number = random.randint(self.min_range, self.max_range)
self.attempts = []
def _set_difficulty(self, difficulty: str):
difficulties = {
'easy': (1, 10),
'medium': (1, 100),
'hard': (1, 1000)
}
return difficulties.get(difficulty, (1, 100))
def start_game(self):
self.secret_number = random.randint(self.min_range, self.max_range)
self.attempts = []
def check_guess(self, guess: int) -> str:
self.attempts.append(guess)
if guess < self.secret_number:
return "It's higher."
elif guess > self.secret_number:
return "It's lower."
else:
return "Correct! Game over."
def get_attempts(self) -> int:
return len(self.attempts)
def get_history(self) -> list:
return self.attempts```
--- ui.py
```## ui.py
from game import Game
class UI:
def start(self):
difficulty = self.select_difficulty()
game = Game(difficulty)
game.start_game()
self.display_welcome_message(game)
feedback = ""
while feedback != "Correct! Game over.":
guess = self.get_user_input("Enter your guess: ")
if self.is_valid_guess(guess):
feedback = game.check_guess(int(guess))
self.display_message(feedback)
self.show_attempts(game.get_attempts())
self.show_history(game.get_history())
else:
self.display_message("Please enter a valid number.")
def display_welcome_message(self, game):
print("Welcome to the Number Guessing Game!")
print(f"Guess the number between {game.min_range} and {game.max_range}.")
def is_valid_guess(self, guess):
return guess.isdigit()
def display_message(self, message: str):
print(message)
def get_user_input(self, prompt: str) -> str:
return input(prompt)
def show_attempts(self, attempts: int):
print(f"Number of attempts: {attempts}")
def show_history(self, history: list):
print("Guess history:")
for guess in history:
print(guess)
def select_difficulty(self) -> str:
while True:
difficulty = input("Select difficulty (easy, medium, hard): ").lower()
if difficulty in ['easy', 'medium', 'hard']:
return difficulty
else:
self.display_message("Invalid difficulty. Please choose 'easy', 'medium', or 'hard'.")```
--- main.py
```## main.py
from ui import UI
class Main:
def main(self):
user_interface = UI()
user_interface.start()
if __name__ == "__main__":
main_instance = Main()
main_instance.main()```
"""
REFINED_PRD_JSON = {
"Language": "en_us",
"Programming Language": "Python",
"Refined Requirements": "Adding graphical interface functionality to enhance the user experience in the number-guessing game.",
"Project Name": "number_guessing_game",
"Refined Product Goals": [
"Ensure a user-friendly interface for the game with the new graphical interface",
"Provide a challenging yet enjoyable game experience with visual enhancements",
"Design the game to be easily extendable for future features, including graphical elements",
],
"Refined User Stories": [
"As a player, I want to interact with a graphical interface to guess numbers and receive visual feedback on my guesses",
"As a player, I want to easily select the difficulty level through the graphical interface",
"As a player, I want to visually track my previous guesses and the number of attempts in the graphical interface",
"As a player, I want to be congratulated with a visually appealing message when I guess the number correctly",
],
"Competitive Analysis": [
"Guess The Number Game A: Basic text interface, no difficulty levels",
"Number Master B: Has difficulty levels, but cluttered interface",
"Quick Guess C: Sleek design, but lacks performance tracking",
"NumGuess D: Good performance tracking, but not mobile-friendly",
"GuessIt E: Mobile-friendly, but too many ads",
"Perfect Guess F: Offers hints, but the hints are not very helpful",
"SmartGuesser G: Has a learning mode, but lacks a competitive edge",
"Graphical Guess H: Graphical interface, but poor user experience due to complex design",
],
"Competitive Quadrant Chart": 'quadrantChart\n title "User Engagement and Game Complexity with Graphical Interface"\n x-axis "Low Complexity" --> "High Complexity"\n y-axis "Low Engagement" --> "High Engagement"\n quadrant-1 "Too Simple"\n quadrant-2 "Niche Appeal"\n quadrant-3 "Complex & Unengaging"\n quadrant-4 "Sweet Spot"\n "Guess The Number Game A": [0.2, 0.4]\n "Number Master B": [0.5, 0.3]\n "Quick Guess C": [0.6, 0.7]\n "NumGuess D": [0.4, 0.6]\n "GuessIt E": [0.7, 0.5]\n "Perfect Guess F": [0.6, 0.4]\n "SmartGuesser G": [0.8, 0.6]\n "Graphical Guess H": [0.7, 0.3]\n "Our Target Product": [0.5, 0.9]',
"Refined Requirement Analysis": [
"The game should maintain its simplicity while integrating a graphical interface for enhanced engagement.",
"Immediate visual feedback is crucial for user satisfaction in the graphical interface.",
"The interface must be intuitive, allowing for easy navigation and selection of game options.",
"The graphical design should be clean and not detract from the game's core guessing mechanic.",
],
"Refined Requirement Pool": [
["P0", "Implement a graphical user interface (GUI) to replace the command-line interaction"],
[
"P0",
"Design a user interface that displays the game status, results, and feedback clearly with graphical elements",
],
["P1", "Incorporate interactive elements for selecting difficulty levels"],
["P1", "Visualize the history of the player's guesses and the number of attempts within the game session"],
["P2", "Create animations for correct or incorrect guesses to enhance user feedback"],
["P2", "Ensure the GUI is responsive and compatible with various screen sizes"],
["P2", "Store and show the history of the player's guesses during a game session"],
],
"UI Design draft": "The UI will feature a modern and minimalist design with a graphical number input field, a submit button with animations, and a dedicated area for visual feedback. It will include interactive elements to select the difficulty level and a visual display for the number of attempts and history of past guesses.",
"Anything UNCLEAR": "",
}
REFINED_DESIGN_JSON = {
"Refined Implementation Approach": "To accommodate the new graphical user interface (GUI) requirements, we will leverage the Tkinter library, which is included with Python and supports the creation of a user-friendly GUI. The game logic will remain in Python, with Tkinter handling the rendering of the interface. We will ensure that the GUI is responsive and provides immediate visual feedback. The main game loop will be event-driven, responding to user inputs such as button clicks and difficulty selection.",
"Refined File list": ["main.py", "game.py", "ui.py", "gui.py"],
"Refined Data structures and interfaces": "\nclassDiagram\n class Game {\n -int secret_number\n -int min_range\n -int max_range\n -list attempts\n +__init__(difficulty: str)\n +start_game()\n +check_guess(guess: int) str\n +get_attempts() int\n +get_history() list\n }\n class UI {\n +start()\n +display_message(message: str)\n +get_user_input(prompt: str) str\n +show_attempts(attempts: int)\n +show_history(history: list)\n +select_difficulty() str\n }\n class GUI {\n +__init__()\n +setup_window()\n +bind_events()\n +update_feedback(message: str)\n +update_attempts(attempts: int)\n +update_history(history: list)\n +show_difficulty_selector()\n +animate_guess_result(correct: bool)\n }\n class Main {\n +main()\n }\n Main --> UI\n UI --> Game\n UI --> GUI\n GUI --> Game\n",
"Refined Program call flow": '\nsequenceDiagram\n participant M as Main\n participant UI as UI\n participant G as Game\n participant GU as GUI\n M->>UI: start()\n UI->>GU: setup_window()\n GU->>GU: bind_events()\n GU->>UI: select_difficulty()\n UI-->>G: __init__(difficulty)\n G->>G: start_game()\n loop Game Loop\n GU->>GU: show_difficulty_selector()\n GU->>UI: get_user_input("Enter your guess:")\n UI-->>G: check_guess(guess)\n G->>GU: update_feedback(feedback)\n G->>GU: update_attempts(attempts)\n G->>GU: update_history(history)\n GU->>GU: animate_guess_result(correct)\n end\n G->>GU: update_feedback("Correct! Game over.")\n GU->>M: main() # Game session ends\n',
"Anything UNCLEAR": "",
}
REFINED_TASK_JSON = {
"Required Python packages": ["random==2.2.1", "Tkinter==8.6"],
"Required Other language third-party packages": ["No third-party dependencies required"],
"Refined Logic Analysis": [
[
"game.py",
"Contains Game class with methods __init__, start_game, check_guess, get_attempts, get_history and uses random library for generating secret_number",
],
[
"ui.py",
"Contains UI class with methods start, display_message, get_user_input, show_attempts, show_history, select_difficulty and interacts with Game class",
],
[
"gui.py",
"Contains GUI class with methods __init__, setup_window, bind_events, update_feedback, update_attempts, update_history, show_difficulty_selector, animate_guess_result and interacts with Game class for GUI rendering",
],
[
"main.py",
"Contains Main class with method main that initializes UI class and starts the event-driven game loop",
],
],
"Refined Task list": ["game.py", "ui.py", "gui.py", "main.py"],
"Full API spec": "",
"Refined Shared Knowledge": "`game.py` contains the core game logic and is used by `ui.py` to interact with the user. `main.py` serves as the entry point to start the game. `gui.py` is introduced to handle the graphical user interface using Tkinter, which will interact with both `game.py` and `ui.py` for a responsive and user-friendly experience.",
"Anything UNCLEAR": "",
}
CODE_PLAN_AND_CHANGE_SAMPLE = {
"Development Plan": [
"Develop the GUI using Tkinter to replace the command-line interface. Start by setting up the main window and event handling. Then, add widgets for displaying the game status, results, and feedback. Implement interactive elements for difficulty selection and visualize the guess history. Finally, create animations for guess feedback and ensure responsiveness across different screen sizes.",
"Modify the main.py to initialize the GUI and start the event-driven game loop. Ensure that the GUI is the primary interface for user interaction.",
],
"Incremental Change": [
"""```diff\nclass GUI:\n- pass\n+ def __init__(self):\n+ self.setup_window()\n+\n+ def setup_window(self):\n+ # Initialize the main window using Tkinter\n+ pass\n+\n+ def bind_events(self):\n+ # Bind button clicks and other events\n+ pass\n+\n+ def update_feedback(self, message: str):\n+ # Update the feedback label with the given message\n+ pass\n+\n+ def update_attempts(self, attempts: int):\n+ # Update the attempts label with the number of attempts\n+ pass\n+\n+ def update_history(self, history: list):\n+ # Update the history view with the list of past guesses\n+ pass\n+\n+ def show_difficulty_selector(self):\n+ # Show buttons or a dropdown for difficulty selection\n+ pass\n+\n+ def animate_guess_result(self, correct: bool):\n+ # Trigger an animation for correct or incorrect guesses\n+ pass\n```""",
"""```diff\nclass Main:\n def main(self):\n- user_interface = UI()\n- user_interface.start()\n+ graphical_user_interface = GUI()\n+ graphical_user_interface.setup_window()\n+ graphical_user_interface.bind_events()\n+ # Start the Tkinter main loop\n+ pass\n\n if __name__ == "__main__":\n main_instance = Main()\n main_instance.main()\n```\n\n3. Plan for ui.py: Refactor ui.py to work with the new GUI class. Remove command-line interactions and delegate display and input tasks to the GUI.\n```python\nclass UI:\n- def display_message(self, message: str):\n- print(message)\n+\n+ def display_message(self, message: str):\n+ # This method will now pass the message to the GUI to display\n+ pass\n\n- def get_user_input(self, prompt: str) -> str:\n- return input(prompt)\n+\n+ def get_user_input(self, prompt: str) -> str:\n+ # This method will now trigger the GUI to get user input\n+ pass\n\n- def show_attempts(self, attempts: int):\n- print(f"Number of attempts: {attempts}")\n+\n+ def show_attempts(self, attempts: int):\n+ # This method will now update the GUI with the number of attempts\n+ pass\n\n- def show_history(self, history: list):\n- print("Guess history:")\n- for guess in history:\n- print(guess)\n+\n+ def show_history(self, history: list):\n+ # This method will now update the GUI with the guess history\n+ pass\n```\n\n4. Plan for game.py: Ensure game.py remains mostly unchanged as it contains the core game logic. However, make minor adjustments if necessary to integrate with the new GUI.\n```python\nclass Game:\n # No changes required for now\n```\n""",
],
}
REFINED_CODE_INPUT_SAMPLE = """
-----Now, game.py to be rewritten
```## game.py
import random
class Game:
def __init__(self, difficulty: str = 'medium'):
self.min_range, self.max_range = self._set_difficulty(difficulty)
self.secret_number = random.randint(self.min_range, self.max_range)
self.attempts = []
def _set_difficulty(self, difficulty: str):
difficulties = {
'easy': (1, 10),
'medium': (1, 100),
'hard': (1, 1000)
}
return difficulties.get(difficulty, (1, 100))
def start_game(self):
self.secret_number = random.randint(self.min_range, self.max_range)
self.attempts = []
def check_guess(self, guess: int) -> str:
self.attempts.append(guess)
if guess < self.secret_number:
return "It's higher."
elif guess > self.secret_number:
return "It's lower."
else:
return "Correct! Game over."
def get_attempts(self) -> int:
return len(self.attempts)
def get_history(self) -> list:
return self.attempts```
"""
REFINED_CODE_SAMPLE = """
## game.py
import random
class Game:
def __init__(self, difficulty: str = 'medium'):
# Set the difficulty level with default value 'medium'
self.min_range, self.max_range = self._set_difficulty(difficulty)
# Initialize the secret number based on the difficulty
self.secret_number = random.randint(self.min_range, self.max_range)
# Initialize the list to keep track of attempts
self.attempts = []
def _set_difficulty(self, difficulty: str):
# Define the range of numbers for each difficulty level
difficulties = {
'easy': (1, 10),
'medium': (1, 100),
'hard': (1, 1000)
}
# Return the corresponding range for the selected difficulty, default to 'medium' if not found
return difficulties.get(difficulty, (1, 100))
def start_game(self):
# Reset the secret number and attempts list for a new game
self.secret_number = random.randint(self.min_range, self.max_range)
self.attempts.clear()
def check_guess(self, guess: int) -> str:
# Add the guess to the attempts list
self.attempts.append(guess)
# Provide feedback based on the guess
if guess < self.secret_number:
return "It's higher."
elif guess > self.secret_number:
return "It's lower."
else:
return "Correct! Game over."
def get_attempts(self) -> int:
# Return the number of attempts made
return len(self.attempts)
def get_history(self) -> list:
# Return the list of attempts made
return self.attempts
"""
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/data/demo_project/game.py | tests/data/demo_project/game.py | ## game.py
import random
from typing import List, Tuple
class Game:
def __init__(self):
self.grid: List[List[int]] = [[0 for _ in range(4)] for _ in range(4)]
self.score: int = 0
self.game_over: bool = False
def reset_game(self):
self.grid = [[0 for _ in range(4)] for _ in range(4)]
self.score = 0
self.game_over = False
self.add_new_tile()
self.add_new_tile()
def move(self, direction: str):
if direction == "up":
self._move_up()
elif direction == "down":
self._move_down()
elif direction == "left":
self._move_left()
elif direction == "right":
self._move_right()
def is_game_over(self) -> bool:
for i in range(4):
for j in range(4):
if self.grid[i][j] == 0:
return False
if j < 3 and self.grid[i][j] == self.grid[i][j + 1]:
return False
if i < 3 and self.grid[i][j] == self.grid[i + 1][j]:
return False
return True
def get_empty_cells(self) -> List[Tuple[int, int]]:
empty_cells = []
for i in range(4):
for j in range(4):
if self.grid[i][j] == 0:
empty_cells.append((i, j))
return empty_cells
def add_new_tile(self):
empty_cells = self.get_empty_cells()
if empty_cells:
x, y = random.choice(empty_cells)
self.grid[x][y] = 2 if random.random() < 0.9 else 4
def get_score(self) -> int:
return self.score
def _move_up(self):
for j in range(4):
for i in range(1, 4):
if self.grid[i][j] != 0:
for k in range(i, 0, -1):
if self.grid[k - 1][j] == 0:
self.grid[k - 1][j] = self.grid[k][j]
self.grid[k][j] = 0
def _move_down(self):
for j in range(4):
for i in range(2, -1, -1):
if self.grid[i][j] != 0:
for k in range(i, 3):
if self.grid[k + 1][j] == 0:
self.grid[k + 1][j] = self.grid[k][j]
self.grid[k][j] = 0
def _move_left(self):
for i in range(4):
for j in range(1, 4):
if self.grid[i][j] != 0:
for k in range(j, 0, -1):
if self.grid[i][k - 1] == 0:
self.grid[i][k - 1] = self.grid[i][k]
self.grid[i][k] = 0
def _move_right(self):
for i in range(4):
for j in range(2, -1, -1):
if self.grid[i][j] != 0:
for k in range(j, 3):
if self.grid[i][k + 1] == 0:
self.grid[i][k + 1] = self.grid[i][k]
self.grid[i][k] = 0
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/stream_output_via_api.py | examples/stream_output_via_api.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/3/27 9:44
@Author : leiwu30
@File : stream_output_via_api.py
@Description : Stream log information and communicate over the network via web api.
"""
import asyncio
import json
import socket
import threading
from contextvars import ContextVar
from flask import Flask, Response, jsonify, request, send_from_directory
from metagpt.const import TUTORIAL_PATH
from metagpt.logs import logger, set_llm_stream_logfunc
from metagpt.roles.tutorial_assistant import TutorialAssistant
from metagpt.utils.stream_pipe import StreamPipe
app = Flask(__name__)
def stream_pipe_log(content):
print(content, end="")
stream_pipe = stream_pipe_var.get(None)
if stream_pipe:
stream_pipe.set_message(content)
def write_tutorial(message):
async def main(idea, stream_pipe):
stream_pipe_var.set(stream_pipe)
role = TutorialAssistant()
await role.run(idea)
def thread_run(idea: str, stream_pipe: StreamPipe = None):
"""
Convert asynchronous function to thread function
"""
asyncio.run(main(idea, stream_pipe))
stream_pipe = StreamPipe()
thread = threading.Thread(
target=thread_run,
args=(
message["content"],
stream_pipe,
),
)
thread.start()
while thread.is_alive():
msg = stream_pipe.get_message()
yield stream_pipe.msg2stream(msg)
@app.route("/v1/chat/completions", methods=["POST"])
def completions():
"""
data: {
"model": "write_tutorial",
"stream": true,
"messages": [
{
"role": "user",
"content": "Write a tutorial about MySQL"
}
]
}
"""
data = json.loads(request.data)
logger.info(json.dumps(data, indent=4, ensure_ascii=False))
# Non-streaming interfaces are not supported yet
stream_type = True if data.get("stream") else False
if not stream_type:
return jsonify({"status": 400, "msg": "Non-streaming requests are not supported, please use `stream=True`."})
# Only accept the last user information
# openai['model'] ~ MetaGPT['agent']
last_message = data["messages"][-1]
model = data["model"]
# write_tutorial
if model == "write_tutorial":
return Response(write_tutorial(last_message), mimetype="text/plain")
else:
return jsonify({"status": 400, "msg": "No suitable agent found."})
@app.route("/download/<path:filename>")
def download_file(filename):
return send_from_directory(TUTORIAL_PATH, filename, as_attachment=True)
if __name__ == "__main__":
"""
curl https://$server_address:$server_port/v1/chat/completions -X POST -d '{
"model": "write_tutorial",
"stream": true,
"messages": [
{
"role": "user",
"content": "Write a tutorial about MySQL"
}
]
}'
"""
server_port = 7860
server_address = socket.gethostbyname(socket.gethostname())
set_llm_stream_logfunc(stream_pipe_log)
stream_pipe_var: ContextVar[StreamPipe] = ContextVar("stream_pipe")
app.run(port=server_port, host=server_address)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/write_tutorial.py | examples/write_tutorial.py | #!/usr/bin/env python3
# _*_ coding: utf-8 _*_
"""
@Time : 2023/9/4 21:40:57
@Author : Stitch-z
@File : tutorial_assistant.py
"""
import asyncio
from metagpt.roles.tutorial_assistant import TutorialAssistant
async def main():
topic = "Write a tutorial about MySQL"
role = TutorialAssistant(language="Chinese")
await role.run(topic)
if __name__ == "__main__":
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/write_design.py | examples/write_design.py | import asyncio
from metagpt.environment.mgx.mgx_env import MGXEnv
from metagpt.logs import logger
from metagpt.roles.architect import Architect
from metagpt.roles.di.team_leader import TeamLeader
from metagpt.schema import Message
async def main():
msg = "Write a TRD for a snake game"
env = MGXEnv()
env.add_roles([TeamLeader(), Architect()])
env.publish_message(Message(content=msg, role="user"))
tl = env.get_role("Mike")
await tl.run()
role = env.get_role("Bob")
result = await role.run(msg)
logger.info(result)
if __name__ == "__main__":
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/search_google.py | examples/search_google.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/7 18:32
@Author : alexanderwu
@File : search_google.py
"""
import asyncio
from metagpt.roles import Searcher
async def main():
await Searcher().run("What are some good sun protection products?")
if __name__ == "__main__":
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/dalle_gpt4v_agent.py | examples/dalle_gpt4v_agent.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : use gpt4v to improve prompt and draw image with dall-e-3
"""set `model: "gpt-4-vision-preview"` in `config2.yaml` first"""
import asyncio
from PIL import Image
from metagpt.actions.action import Action
from metagpt.logs import logger
from metagpt.roles.role import Role
from metagpt.schema import Message
from metagpt.utils.common import encode_image
class GenAndImproveImageAction(Action):
save_image: bool = True
async def generate_image(self, prompt: str) -> Image:
imgs = await self.llm.gen_image(model="dall-e-3", prompt=prompt)
return imgs[0]
async def refine_prompt(self, old_prompt: str, image: Image) -> str:
msg = (
f"You are a creative painter, with the given generated image and old prompt: {old_prompt}, "
f"please refine the prompt and generate new one. Just output the new prompt."
)
b64_img = encode_image(image)
new_prompt = await self.llm.aask(msg=msg, images=[b64_img])
return new_prompt
async def evaluate_images(self, old_prompt: str, images: list[Image]) -> str:
msg = (
"With the prompt and two generated image, to judge if the second one is better than the first one. "
"If so, just output True else output False"
)
b64_imgs = [encode_image(img) for img in images]
res = await self.llm.aask(msg=msg, images=b64_imgs)
return res
async def run(self, messages: list[Message]) -> str:
prompt = messages[-1].content
old_img: Image = await self.generate_image(prompt)
new_prompt = await self.refine_prompt(old_prompt=prompt, image=old_img)
logger.info(f"original prompt: {prompt}")
logger.info(f"refined prompt: {new_prompt}")
new_img: Image = await self.generate_image(new_prompt)
if self.save_image:
old_img.save("./img_by-dall-e_old.png")
new_img.save("./img_by-dall-e_new.png")
res = await self.evaluate_images(old_prompt=prompt, images=[old_img, new_img])
opinion = f"The second generated image is better than the first one: {res}"
logger.info(f"evaluate opinion: {opinion}")
return opinion
class Painter(Role):
name: str = "MaLiang"
profile: str = "Painter"
goal: str = "to generate fine painting"
def __init__(self, **data):
super().__init__(**data)
self.set_actions([GenAndImproveImageAction])
async def main():
role = Painter()
await role.run(with_message="a girl with flowers")
if __name__ == "__main__":
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/build_customized_agent.py | examples/build_customized_agent.py | """
Filename: MetaGPT/examples/build_customized_agent.py
Created Date: Tuesday, September 19th 2023, 6:52:25 pm
Author: garylin2099
"""
import asyncio
import re
import subprocess
import fire
from metagpt.actions import Action
from metagpt.logs import logger
from metagpt.roles.role import Role, RoleReactMode
from metagpt.schema import Message
class SimpleWriteCode(Action):
PROMPT_TEMPLATE: str = """
Write a python function that can {instruction} and provide two runnable test cases.
Return ```python your_code_here ``` with NO other texts,
your code:
"""
name: str = "SimpleWriteCode"
async def run(self, instruction: str):
prompt = self.PROMPT_TEMPLATE.format(instruction=instruction)
rsp = await self._aask(prompt)
code_text = SimpleWriteCode.parse_code(rsp)
return code_text
@staticmethod
def parse_code(rsp):
pattern = r"```python(.*)```"
match = re.search(pattern, rsp, re.DOTALL)
code_text = match.group(1) if match else rsp
return code_text
class SimpleRunCode(Action):
name: str = "SimpleRunCode"
async def run(self, code_text: str):
result = subprocess.run(["python3", "-c", code_text], capture_output=True, text=True)
code_result = result.stdout
logger.info(f"{code_result=}")
return code_result
class SimpleCoder(Role):
name: str = "Alice"
profile: str = "SimpleCoder"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.set_actions([SimpleWriteCode])
async def _act(self) -> Message:
logger.info(f"{self._setting}: to do {self.rc.todo}({self.rc.todo.name})")
todo = self.rc.todo # todo will be SimpleWriteCode()
msg = self.get_memories(k=1)[0] # find the most recent messages
code_text = await todo.run(msg.content)
msg = Message(content=code_text, role=self.profile, cause_by=type(todo))
return msg
class RunnableCoder(Role):
name: str = "Alice"
profile: str = "RunnableCoder"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.set_actions([SimpleWriteCode, SimpleRunCode])
self._set_react_mode(react_mode=RoleReactMode.BY_ORDER.value)
async def _act(self) -> Message:
logger.info(f"{self._setting}: to do {self.rc.todo}({self.rc.todo.name})")
# By choosing the Action by order under the hood
# todo will be first SimpleWriteCode() then SimpleRunCode()
todo = self.rc.todo
msg = self.get_memories(k=1)[0] # find the most k recent messages
result = await todo.run(msg.content)
msg = Message(content=result, role=self.profile, cause_by=type(todo))
self.rc.memory.add(msg)
return msg
def main(msg="write a function that calculates the product of a list and run it"):
# role = SimpleCoder()
role = RunnableCoder()
logger.info(msg)
result = asyncio.run(role.run(msg))
logger.info(result)
if __name__ == "__main__":
fire.Fire(main)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/mgx_write_project_framework.py | examples/mgx_write_project_framework.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/6/13
@Author : mashenquan
@File : write_project_framework.py
@Desc : The implementation of RFC243. https://deepwisdom.feishu.cn/wiki/QobGwPkImijoyukBUKHcrYetnBb
"""
import asyncio
import json
import uuid
from json import JSONDecodeError
from pathlib import Path
from typing import Dict, List
import typer
from pydantic import BaseModel
from metagpt.config2 import Config
from metagpt.const import DEFAULT_WORKSPACE_ROOT
from metagpt.context import Context
from metagpt.environment import Environment
from metagpt.environment.mgx.mgx_env import MGXEnv
from metagpt.logs import logger
from metagpt.roles import Architect
from metagpt.roles.di.team_leader import TeamLeader
from metagpt.schema import AIMessage, UserMessage
from metagpt.strategy.experience_retriever import TRDToolExpRetriever
from metagpt.utils.common import aread
app = typer.Typer(add_completion=False)
class EnvBuilder(BaseModel):
context: Context
user_requirements: List[str]
actors: Dict[str, str]
technical_constraint: str
output_dir: Path
def build(self) -> Environment:
env = MGXEnv(context=self.context)
team_leader = TeamLeader()
architect = Architect(experience_retriever=TRDToolExpRetriever())
# Prepare context
use_case_actors = "".join([f"- {v}: {k}\n" for k, v in self.actors.items()])
msg = """
The content of "Actor, System, External System" provides an explanation of actors and systems that appear in UML Use Case diagram.
## Actor, System, External System
{use_case_actors}
"""
architect.rc.memory.add(AIMessage(content=msg.format(use_case_actors=use_case_actors)))
# Prepare technical requirements
msg = """
"Additional Technical Requirements" specifies the additional technical requirements that the generated software framework code must meet.
## Additional Technical Requirements
{technical_requirements}
"""
architect.rc.memory.add(AIMessage(content=msg.format(technical_requirements=self.technical_constraint)))
env.add_roles([team_leader, architect])
return env
async def develop(
context: Context,
user_requirement_filename: str,
actors_filename: str,
constraint_filename: str,
output_dir: str,
):
output_dir = Path(output_dir) if output_dir else DEFAULT_WORKSPACE_ROOT / uuid.uuid4().hex
v = await aread(filename=user_requirement_filename)
try:
user_requirements = json.loads(v)
except JSONDecodeError:
user_requirements = [v]
v = await aread(filename=actors_filename)
actors = json.loads(v)
technical_constraint = await aread(filename=constraint_filename)
env_builder = EnvBuilder(
context=context,
user_requirements=user_requirements,
actors=actors,
technical_constraint=technical_constraint,
output_dir=output_dir,
)
env = env_builder.build()
msg = """
Given the user requirement of "User Requirements", write out the software framework.
## User Requirements
{user_requirements}
"""
env.publish_message(
UserMessage(content=msg.format(user_requirements="\n".join(user_requirements)), send_to="Bob"),
user_defined_recipient="Bob",
)
while not env.is_idle:
await env.run()
@app.command()
def startup(
user_requirement_filename: str = typer.Argument(..., help="The filename of the user requirements."),
actors_filename: str = typer.Argument(..., help="The filename of UML use case actors description."),
llm_config: str = typer.Option(default="", help="Low-cost LLM config"),
constraint_filename: str = typer.Option(default="", help="What technical dependency constraints are."),
output_dir: str = typer.Option(default="", help="Output directory."),
):
if llm_config and Path(llm_config).exists():
config = Config.from_yaml_file(Path(llm_config))
else:
logger.info("GPT 4 turbo is recommended")
config = Config.default()
ctx = Context(config=config)
asyncio.run(develop(ctx, user_requirement_filename, actors_filename, constraint_filename, output_dir))
if __name__ == "__main__":
app()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/use_off_the_shelf_agent.py | examples/use_off_the_shelf_agent.py | """
Filename: MetaGPT/examples/use_off_the_shelf_agent.py
Created Date: Tuesday, September 19th 2023, 6:52:25 pm
Author: garylin2099
"""
import asyncio
from metagpt.environment.mgx.mgx_env import MGXEnv
from metagpt.logs import logger
from metagpt.roles.di.team_leader import TeamLeader
from metagpt.roles.product_manager import ProductManager
from metagpt.schema import Message
async def main():
msg = "Write a PRD for a snake game"
env = MGXEnv()
env.add_roles([TeamLeader(), ProductManager()])
env.publish_message(Message(content=msg, role="user"))
tl = env.get_role("Mike")
await tl.run()
role = env.get_role("Alice")
result = await role.run(msg)
logger.info(result.content[:100])
if __name__ == "__main__":
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/search_with_specific_engine.py | examples/search_with_specific_engine.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
import asyncio
from metagpt.config2 import Config
from metagpt.roles import Searcher
from metagpt.tools.search_engine import SearchEngine
async def main():
question = "What are the most interesting human facts?"
search = Config.default().search
kwargs = search.model_dump()
await Searcher(search_engine=SearchEngine(engine=search.api_type, **kwargs)).run(question)
if __name__ == "__main__":
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/build_customized_multi_agents.py | examples/build_customized_multi_agents.py | """
Filename: MetaGPT/examples/build_customized_multi_agents.py
Created Date: Wednesday, November 15th 2023, 7:12:39 pm
Author: garylin2099
"""
import re
import fire
from metagpt.actions import Action, UserRequirement
from metagpt.logs import logger
from metagpt.roles import Role
from metagpt.schema import Message
from metagpt.team import Team
def parse_code(rsp):
pattern = r"```python(.*)```"
match = re.search(pattern, rsp, re.DOTALL)
code_text = match.group(1) if match else rsp
return code_text
class SimpleWriteCode(Action):
PROMPT_TEMPLATE: str = """
Write a python function that can {instruction}.
Return ```python your_code_here ``` with NO other texts,
your code:
"""
name: str = "SimpleWriteCode"
async def run(self, instruction: str):
prompt = self.PROMPT_TEMPLATE.format(instruction=instruction)
rsp = await self._aask(prompt)
code_text = parse_code(rsp)
return code_text
class SimpleCoder(Role):
name: str = "Alice"
profile: str = "SimpleCoder"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._watch([UserRequirement])
self.set_actions([SimpleWriteCode])
class SimpleWriteTest(Action):
PROMPT_TEMPLATE: str = """
Context: {context}
Write {k} unit tests using pytest for the given function, assuming you have imported it.
Return ```python your_code_here ``` with NO other texts,
your code:
"""
name: str = "SimpleWriteTest"
async def run(self, context: str, k: int = 3):
prompt = self.PROMPT_TEMPLATE.format(context=context, k=k)
rsp = await self._aask(prompt)
code_text = parse_code(rsp)
return code_text
class SimpleTester(Role):
name: str = "Bob"
profile: str = "SimpleTester"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.set_actions([SimpleWriteTest])
# self._watch([SimpleWriteCode])
self._watch([SimpleWriteCode, SimpleWriteReview]) # feel free to try this too
async def _act(self) -> Message:
logger.info(f"{self._setting}: to do {self.rc.todo}({self.rc.todo.name})")
todo = self.rc.todo
# context = self.get_memories(k=1)[0].content # use the most recent memory as context
context = self.get_memories() # use all memories as context
code_text = await todo.run(context, k=5) # specify arguments
msg = Message(content=code_text, role=self.profile, cause_by=type(todo))
return msg
class SimpleWriteReview(Action):
PROMPT_TEMPLATE: str = """
Context: {context}
Review the test cases and provide one critical comments:
"""
name: str = "SimpleWriteReview"
async def run(self, context: str):
prompt = self.PROMPT_TEMPLATE.format(context=context)
rsp = await self._aask(prompt)
return rsp
class SimpleReviewer(Role):
name: str = "Charlie"
profile: str = "SimpleReviewer"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.set_actions([SimpleWriteReview])
self._watch([SimpleWriteTest])
async def main(
idea: str = "write a function that calculates the product of a list",
investment: float = 3.0,
n_round: int = 5,
add_human: bool = False,
):
logger.info(idea)
team = Team()
team.hire(
[
SimpleCoder(),
SimpleTester(),
SimpleReviewer(is_human=add_human),
]
)
team.invest(investment=investment)
team.run_project(idea)
await team.run(n_round=n_round)
if __name__ == "__main__":
fire.Fire(main)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/serialize_model.py | examples/serialize_model.py | from metagpt.environment.mgx.mgx_env import MGXEnv
from metagpt.logs import logger
def main():
"""Demonstrates serialization and deserialization using SerializationMixin.
This example creates an instance of MGXEnv, serializes it to a file,
and then deserializes it back to an instance.
If executed correctly, the following log messages will be output:
MGXEnv serialization successful. File saved at: /.../workspace/storage/MGXEnv.json
MGXEnv deserialization successful. Instance created from file: /.../workspace/storage/MGXEnv.json
The instance is MGXEnv()
"""
env = MGXEnv()
env.serialize()
env: MGXEnv = MGXEnv.deserialize()
logger.info(f"The instance is {repr(env)}")
if __name__ == "__main__":
main()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/hello_world.py | examples/hello_world.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/6 14:13
@Author : alexanderwu
@File : hello_world.py
"""
import asyncio
from metagpt.llm import LLM
from metagpt.logs import logger
async def ask_and_print(question: str, llm: LLM, system_prompt) -> str:
logger.info(f"Q: {question}")
rsp = await llm.aask(question, system_msgs=[system_prompt], stream=True)
if hasattr(llm, "reasoning_content") and llm.reasoning_content:
logger.info(f"A reasoning: {llm.reasoning_content}")
logger.info(f"A: {rsp}")
return rsp
async def lowlevel_api_example(llm: LLM):
logger.info("low level api example")
logger.info(await llm.aask_batch(["hi", "write python hello world."]))
hello_msg = [{"role": "user", "content": "count from 1 to 10. split by newline."}]
logger.info(await llm.acompletion(hello_msg))
logger.info(await llm.acompletion_text(hello_msg))
# streaming mode, much slower
await llm.acompletion_text(hello_msg, stream=True)
# check completion if exist to test llm complete functions
if hasattr(llm, "completion"):
logger.info(llm.completion(hello_msg))
async def main():
llm = LLM()
await ask_and_print("what's your name?", llm, "I'm a helpful AI assistant.")
await ask_and_print("who are you?", llm, "just answer 'I am a robot' if the question is 'who are you'")
await lowlevel_api_example(llm)
if __name__ == "__main__":
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/cr.py | examples/cr.py | import fire
from metagpt.roles.di.engineer2 import Engineer2
from metagpt.tools.libs.cr import CodeReview
async def main(msg):
role = Engineer2(tools=["Plan", "Editor:write,read", "RoleZero", "ValidateAndRewriteCode", "CodeReview"])
cr = CodeReview()
role.tool_execution_map.update({"CodeReview.review": cr.review, "CodeReview.fix": cr.fix})
await role.run(msg)
if __name__ == "__main__":
fire.Fire(main)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/search_enhanced_qa.py | examples/search_enhanced_qa.py | """
This script demonstrates how to use the SearchEnhancedQA action to answer questions
by leveraging web search results. It showcases a simple example of querying about
the current weather in Beijing.
The SearchEnhancedQA action combines web search capabilities with natural language
processing to provide informative answers to user queries.
"""
import asyncio
from metagpt.actions.search_enhanced_qa import SearchEnhancedQA
async def main():
"""Runs a sample query through SearchEnhancedQA and prints the result."""
action = SearchEnhancedQA()
query = "What is the weather like in Beijing today?"
answer = await action.run(query)
print(f"The answer to '{query}' is:\n\n{answer}")
if __name__ == "__main__":
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/debate_simple.py | examples/debate_simple.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/12/22
@Author : alexanderwu
@File : debate_simple.py
"""
import asyncio
from metagpt.actions import Action
from metagpt.config2 import Config
from metagpt.environment import Environment
from metagpt.roles import Role
from metagpt.team import Team
gpt35 = Config.default()
gpt35.llm.model = "gpt-3.5-turbo"
gpt4 = Config.default()
gpt4.llm.model = "gpt-4-turbo"
action1 = Action(config=gpt4, name="AlexSay", instruction="Express your opinion with emotion and don't repeat it")
action2 = Action(config=gpt35, name="BobSay", instruction="Express your opinion with emotion and don't repeat it")
alex = Role(name="Alex", profile="Democratic candidate", goal="Win the election", actions=[action1], watch=[action2])
bob = Role(name="Bob", profile="Republican candidate", goal="Win the election", actions=[action2], watch=[action1])
env = Environment(desc="US election live broadcast")
team = Team(investment=10.0, env=env, roles=[alex, bob])
asyncio.run(team.run(idea="Topic: climate change. Under 80 words per message.", send_to="Alex", n_round=5))
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/ping.py | examples/ping.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/4/22 14:28
@Author : alexanderwu
@File : ping.py
"""
import asyncio
from metagpt.llm import LLM
from metagpt.logs import logger
async def ask_and_print(question: str, llm: LLM, system_prompt) -> str:
logger.info(f"Q: {question}")
rsp = await llm.aask(question, system_msgs=[system_prompt])
logger.info(f"A: {rsp}")
logger.info("\n")
return rsp
async def main():
llm = LLM()
await ask_and_print("ping?", llm, "Just answer pong when ping.")
if __name__ == "__main__":
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/agent_creator.py | examples/agent_creator.py | """
Filename: MetaGPT/examples/agent_creator.py
Created Date: Tuesday, September 12th 2023, 3:28:37 pm
Author: garylin2099
"""
import re
from metagpt.actions import Action
from metagpt.config2 import config
from metagpt.const import METAGPT_ROOT
from metagpt.logs import logger
from metagpt.roles import Role
from metagpt.schema import Message
EXAMPLE_CODE_FILE = METAGPT_ROOT / "examples/build_customized_agent.py"
MULTI_ACTION_AGENT_CODE_EXAMPLE = EXAMPLE_CODE_FILE.read_text()
class CreateAgent(Action):
PROMPT_TEMPLATE: str = """
### BACKGROUND
You are using an agent framework called metagpt to write agents capable of different actions,
the usage of metagpt can be illustrated by the following example:
### EXAMPLE STARTS AT THIS LINE
{example}
### EXAMPLE ENDS AT THIS LINE
### TASK
Now you should create an agent with appropriate actions based on the instruction, consider carefully about
the PROMPT_TEMPLATE of all actions and when to call self._aask()
### INSTRUCTION
{instruction}
### YOUR CODE
Return ```python your_code_here ``` with NO other texts, your code:
"""
async def run(self, example: str, instruction: str):
prompt = self.PROMPT_TEMPLATE.format(example=example, instruction=instruction)
# logger.info(prompt)
rsp = await self._aask(prompt)
code_text = CreateAgent.parse_code(rsp)
return code_text
@staticmethod
def parse_code(rsp):
pattern = r"```python(.*)```"
match = re.search(pattern, rsp, re.DOTALL)
code_text = match.group(1) if match else ""
config.workspace.path.mkdir(parents=True, exist_ok=True)
new_file = config.workspace.path / "agent_created_agent.py"
new_file.write_text(code_text)
return code_text
class AgentCreator(Role):
name: str = "Matrix"
profile: str = "AgentCreator"
agent_template: str = MULTI_ACTION_AGENT_CODE_EXAMPLE
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.set_actions([CreateAgent])
async def _act(self) -> Message:
logger.info(f"{self._setting}: to do {self.rc.todo}({self.rc.todo.name})")
todo = self.rc.todo
msg = self.rc.memory.get()[-1]
instruction = msg.content
code_text = await CreateAgent().run(example=self.agent_template, instruction=instruction)
msg = Message(content=code_text, role=self.profile, cause_by=todo)
return msg
if __name__ == "__main__":
import asyncio
async def main():
agent_template = MULTI_ACTION_AGENT_CODE_EXAMPLE
creator = AgentCreator(agent_template=agent_template)
msg = """
Write an agent called SimpleTester that will take any code snippet (str) and do the following:
1. write a testing code (str) for testing the given code snippet, save the testing code as a .py file in the current working directory;
2. run the testing code.
You can use pytest as the testing framework.
"""
await creator.run(msg)
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/write_game_code.py | examples/write_game_code.py | import asyncio
import time
from metagpt.environment.mgx.mgx_env import MGXEnv
from metagpt.roles.di.engineer2 import Engineer2
from metagpt.roles.di.team_leader import TeamLeader
from metagpt.schema import Message
async def main(requirement="", user_defined_recipient="", enable_human_input=False, allow_idle_time=30):
env = MGXEnv()
env.add_roles([TeamLeader(), Engineer2()])
msg = Message(content=requirement)
env.attach_images(msg) # attach image content if applicable
if user_defined_recipient:
msg.send_to = {user_defined_recipient}
env.publish_message(msg, user_defined_recipient=user_defined_recipient)
else:
env.publish_message(msg)
allow_idle_time = allow_idle_time if enable_human_input else 1
start_time = time.time()
while time.time() - start_time < allow_idle_time:
if not env.is_idle:
await env.run()
start_time = time.time() # reset start time
if __name__ == "__main__":
requirement = "Write code for a 2048 game"
user_defined_recipient = ""
asyncio.run(
main(
requirement=requirement,
user_defined_recipient=user_defined_recipient,
enable_human_input=False,
allow_idle_time=60,
)
)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/write_novel.py | examples/write_novel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/2/1 12:01
@Author : alexanderwu
@File : write_novel.py
"""
import asyncio
from typing import List
from pydantic import BaseModel, Field
from metagpt.actions.action_node import ActionNode
from metagpt.llm import LLM
class Chapter(BaseModel):
name: str = Field(default="Chapter 1", description="The name of the chapter.")
content: str = Field(default="...", description="The content of the chapter. No more than 1000 words.")
class Chapters(BaseModel):
chapters: List[Chapter] = Field(
default=[
{"name": "Chapter 1", "content": "..."},
{"name": "Chapter 2", "content": "..."},
{"name": "Chapter 3", "content": "..."},
],
description="The chapters of the novel.",
)
class Novel(BaseModel):
name: str = Field(default="The Lord of the Rings", description="The name of the novel.")
user_group: str = Field(default="...", description="The user group of the novel.")
outlines: List[str] = Field(
default=["Chapter 1: ...", "Chapter 2: ...", "Chapter 3: ..."],
description="The outlines of the novel. No more than 10 chapters.",
)
background: str = Field(default="...", description="The background of the novel.")
character_names: List[str] = Field(default=["Frodo", "Gandalf", "Sauron"], description="The characters.")
conflict: str = Field(default="...", description="The conflict of the characters.")
plot: str = Field(default="...", description="The plot of the novel.")
ending: str = Field(default="...", description="The ending of the novel.")
async def generate_novel():
instruction = (
"Write a novel named 'Reborn in Skyrim'. "
"Fill the empty nodes with your own ideas. Be creative! Use your own words!"
"I will tip you $100,000 if you write a good novel."
)
novel_node = await ActionNode.from_pydantic(Novel).fill(req=instruction, llm=LLM())
chap_node = await ActionNode.from_pydantic(Chapters).fill(
req=f"### instruction\n{instruction}\n### novel\n{novel_node.content}", llm=LLM()
)
print(chap_node.instruct_content)
asyncio.run(generate_novel())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/invoice_ocr.py | examples/invoice_ocr.py | #!/usr/bin/env python3
# _*_ coding: utf-8 _*_
"""
@Time : 2023/9/21 21:40:57
@Author : Stitch-z
@File : invoice_ocr.py
"""
import asyncio
from pathlib import Path
from metagpt.roles.invoice_ocr_assistant import InvoiceOCRAssistant, InvoicePath
from metagpt.schema import Message
async def main():
relative_paths = [
Path("../tests/data/invoices/invoice-1.pdf"),
Path("../tests/data/invoices/invoice-2.png"),
Path("../tests/data/invoices/invoice-3.jpg"),
Path("../tests/data/invoices/invoice-4.zip"),
]
# The absolute path of the file
absolute_file_paths = [Path.cwd() / path for path in relative_paths]
for path in absolute_file_paths:
role = InvoiceOCRAssistant()
await role.run(Message(content="Invoicing date", instruct_content=InvoicePath(file_path=path)))
if __name__ == "__main__":
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/research.py | examples/research.py | #!/usr/bin/env python
import asyncio
from metagpt.roles.researcher import RESEARCH_PATH, Researcher
async def main():
topic = "dataiku vs. datarobot"
role = Researcher(language="en-us")
await role.run(topic)
print(f"save report to {RESEARCH_PATH / f'{topic}.md'}.")
if __name__ == "__main__":
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/debate.py | examples/debate.py | """
Filename: MetaGPT/examples/debate.py
Created Date: Tuesday, September 19th 2023, 6:52:25 pm
Author: garylin2099
@Modified By: mashenquan, 2023-11-1. In accordance with Chapter 2.1.3 of RFC 116, modify the data type of the `send_to`
value of the `Message` object; modify the argument type of `get_by_actions`.
"""
import asyncio
import platform
from typing import Any
import fire
from metagpt.actions import Action, UserRequirement
from metagpt.logs import logger
from metagpt.roles import Role
from metagpt.schema import Message
from metagpt.team import Team
class SpeakAloud(Action):
"""Action: Speak out aloud in a debate (quarrel)"""
PROMPT_TEMPLATE: str = """
## BACKGROUND
Suppose you are {name}, you are in a debate with {opponent_name}.
## DEBATE HISTORY
Previous rounds:
{context}
## YOUR TURN
Now it's your turn, you should closely respond to your opponent's latest argument, state your position, defend your arguments, and attack your opponent's arguments,
craft a strong and emotional response in 80 words, in {name}'s rhetoric and viewpoints, your will argue:
"""
name: str = "SpeakAloud"
async def run(self, context: str, name: str, opponent_name: str):
prompt = self.PROMPT_TEMPLATE.format(context=context, name=name, opponent_name=opponent_name)
# logger.info(prompt)
rsp = await self._aask(prompt)
return rsp
class Debator(Role):
name: str = ""
profile: str = ""
opponent_name: str = ""
def __init__(self, **data: Any):
super().__init__(**data)
self.set_actions([SpeakAloud])
self._watch([UserRequirement, SpeakAloud])
async def _observe(self) -> int:
await super()._observe()
# accept messages sent (from opponent) to self, disregard own messages from the last round
self.rc.news = [msg for msg in self.rc.news if msg.send_to == {self.name}]
return len(self.rc.news)
async def _act(self) -> Message:
logger.info(f"{self._setting}: to do {self.rc.todo}({self.rc.todo.name})")
todo = self.rc.todo # An instance of SpeakAloud
memories = self.get_memories()
context = "\n".join(f"{msg.sent_from}: {msg.content}" for msg in memories)
# print(context)
rsp = await todo.run(context=context, name=self.name, opponent_name=self.opponent_name)
msg = Message(
content=rsp,
role=self.profile,
cause_by=type(todo),
sent_from=self.name,
send_to=self.opponent_name,
)
self.rc.memory.add(msg)
return msg
async def debate(idea: str, investment: float = 3.0, n_round: int = 5):
"""Run a team of presidents and watch they quarrel. :)"""
Biden = Debator(name="Biden", profile="Democrat", opponent_name="Trump")
Trump = Debator(name="Trump", profile="Republican", opponent_name="Biden")
team = Team()
team.hire([Biden, Trump])
team.invest(investment)
team.run_project(idea, send_to="Biden") # send debate topic to Biden and let him speak first
await team.run(n_round=n_round)
def main(idea: str, investment: float = 3.0, n_round: int = 10):
"""
:param idea: Debate topic, such as "Topic: The U.S. should commit more in climate change fighting"
or "Trump: Climate change is a hoax"
:param investment: contribute a certain dollar amount to watch the debate
:param n_round: maximum rounds of the debate
:return:
"""
if platform.system() == "Windows":
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
asyncio.run(debate(idea, investment, n_round))
if __name__ == "__main__":
fire.Fire(main) # run as python debate.py --idea="TOPIC" --investment=3.0 --n_round=5
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/llm_vision.py | examples/llm_vision.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : example to run the ability of LLM vision
import asyncio
from pathlib import Path
from metagpt.llm import LLM
from metagpt.utils.common import encode_image
async def main():
llm = LLM()
# check if the configured llm supports llm-vision capacity. If not, it will throw a error
invoice_path = Path(__file__).parent.joinpath("..", "tests", "data", "invoices", "invoice-2.png")
img_base64 = encode_image(invoice_path)
res = await llm.aask(msg="return `True` if this image might be a invoice, or return `False`", images=[img_base64])
assert ("true" in res.lower()) or ("invoice" in res.lower())
if __name__ == "__main__":
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/android_assistant/run_assistant.py | examples/android_assistant/run_assistant.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : the entry of android assistant including learning and acting stage
# See the usage README inside `metagpt/ext/android_assistant`
# README see `metagpt/ext/android_assistant/README.md`
import asyncio
from pathlib import Path
import typer
from metagpt.config2 import config
from metagpt.environment.android.android_env import AndroidEnv
from metagpt.ext.android_assistant.roles.android_assistant import AndroidAssistant
from metagpt.team import Team
app = typer.Typer(add_completion=False, pretty_exceptions_show_locals=False)
@app.command("", help="Run a Android Assistant")
def startup(
task_desc: str = typer.Argument(help="the task description you want the android assistant to learn or act"),
n_round: int = typer.Option(default=20, help="The max round to do an app operation task."),
stage: str = typer.Option(default="learn", help="stage: learn / act"),
mode: str = typer.Option(default="auto", help="mode: auto / manual , when state=learn"),
app_name: str = typer.Option(default="demo", help="the name of app you want to run"),
investment: float = typer.Option(default=5.0, help="Dollar amount to invest in the AI company."),
refine_doc: bool = typer.Option(
default=False, help="Refine existing operation docs based on the latest observation if True."
),
min_dist: int = typer.Option(
default=30, help="The minimum distance between elements to prevent overlapping during the labeling process."
),
android_screenshot_dir: str = typer.Option(
default="/sdcard/Pictures/Screenshots",
help="The path to store screenshots on android device. Make sure it exists.",
),
android_xml_dir: str = typer.Option(
default="/sdcard",
help="The path to store xml files for determining UI elements localtion. Make sure it exists.",
),
device_id: str = typer.Option(default="emulator-5554", help="The Android device_id"),
):
config.extra = {
"stage": stage,
"mode": mode,
"app_name": app_name,
"task_desc": task_desc,
"refine_doc": refine_doc,
"min_dist": min_dist,
"android_screenshot_dir": android_screenshot_dir,
"android_xml_dir": android_xml_dir,
"device_id": device_id,
}
team = Team(
env=AndroidEnv(
device_id=device_id,
xml_dir=Path(android_xml_dir),
screenshot_dir=Path(android_screenshot_dir),
)
)
team.hire([AndroidAssistant(output_root_dir=Path(__file__).parent)])
team.invest(investment)
team.run_project(idea=task_desc)
asyncio.run(team.run(n_round=n_round))
if __name__ == "__main__":
app()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/spo/optimize.py | examples/spo/optimize.py | import argparse
from metagpt.ext.spo.components.optimizer import PromptOptimizer
from metagpt.ext.spo.utils.llm_client import SPO_LLM
def parse_args():
parser = argparse.ArgumentParser(description="SPO PromptOptimizer CLI")
# LLM parameter
parser.add_argument("--opt-model", type=str, default="claude-3-5-sonnet-20240620", help="Model for optimization")
parser.add_argument("--opt-temp", type=float, default=0.7, help="Temperature for optimization")
parser.add_argument("--eval-model", type=str, default="gpt-4o-mini", help="Model for evaluation")
parser.add_argument("--eval-temp", type=float, default=0.3, help="Temperature for evaluation")
parser.add_argument("--exec-model", type=str, default="gpt-4o-mini", help="Model for execution")
parser.add_argument("--exec-temp", type=float, default=0, help="Temperature for execution")
# PromptOptimizer parameter
parser.add_argument("--workspace", type=str, default="workspace", help="Path for optimized output")
parser.add_argument("--initial-round", type=int, default=1, help="Initial round number")
parser.add_argument("--max-rounds", type=int, default=10, help="Maximum number of rounds")
parser.add_argument("--template", type=str, default="Poem.yaml", help="Template file name")
parser.add_argument("--name", type=str, default="Poem", help="Project name")
return parser.parse_args()
def main():
args = parse_args()
SPO_LLM.initialize(
optimize_kwargs={"model": args.opt_model, "temperature": args.opt_temp},
evaluate_kwargs={"model": args.eval_model, "temperature": args.eval_temp},
execute_kwargs={"model": args.exec_model, "temperature": args.exec_temp},
)
optimizer = PromptOptimizer(
optimized_path=args.workspace,
initial_round=args.initial_round,
max_rounds=args.max_rounds,
template=args.template,
name=args.name,
)
optimizer.optimize()
if __name__ == "__main__":
main()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/werewolf_game/start_game.py | examples/werewolf_game/start_game.py | import asyncio
import fire
from metagpt.ext.werewolf.roles import Guard, Moderator, Seer, Villager, Werewolf, Witch
from metagpt.ext.werewolf.roles.human_player import prepare_human_player
from metagpt.ext.werewolf.werewolf_game import WerewolfGame
from metagpt.logs import logger
async def start_game(
investment: float = 3.0,
n_round: int = 5,
shuffle: bool = True,
add_human: bool = False,
use_reflection: bool = True,
use_experience: bool = False,
use_memory_selection: bool = False,
new_experience_version: str = "",
):
game = WerewolfGame()
game_setup, players = game.env.init_game_setup(
role_uniq_objs=[Villager, Werewolf, Guard, Seer, Witch],
num_werewolf=2,
num_villager=2,
shuffle=shuffle,
add_human=add_human,
use_reflection=use_reflection,
use_experience=use_experience,
use_memory_selection=use_memory_selection,
new_experience_version=new_experience_version,
prepare_human_player=prepare_human_player,
)
logger.info(f"{game_setup}")
players = [Moderator()] + players
game.hire(players)
game.invest(investment)
game.run_project(game_setup)
await game.run(n_round=n_round)
def main(
investment: float = 20.0,
n_round: int = 100,
shuffle: bool = True,
add_human: bool = False,
use_reflection: bool = True,
use_experience: bool = False,
use_memory_selection: bool = False,
new_experience_version: str = "",
):
asyncio.run(
start_game(
investment,
n_round,
shuffle,
add_human,
use_reflection,
use_experience,
use_memory_selection,
new_experience_version,
)
)
if __name__ == "__main__":
fire.Fire(main)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/werewolf_game/evals/eval.py | examples/werewolf_game/evals/eval.py | """
Filename: MetaGPT/examples/werewolf_game/evals/eval.py
Created Date: Oct 18, 2023
Updated Date: Oct 24, 2023
Author: [Aria](https://github.com/ariafyy)
Info: eval the Voting Accuracy Rate of non_werewolves and Vote Difficulity
"""
import glob
import os
import re
from pathlib import Path
import numpy as np
import pandas as pd
from tqdm import tqdm
from utils import Utils
from metagpt.const import DEFAULT_WORKSPACE_ROOT, METAGPT_ROOT
from metagpt.environment.werewolf.const import RoleType
class Vote:
"""Vote Evaluation"""
def __init__(self):
self.OUT_PATH = DEFAULT_WORKSPACE_ROOT / "outputs"
os.makedirs(self.OUT_PATH, exist_ok=True)
self.SUB_FOLDER_LIST = ["01-10", "11-20", "21-30"]
def _get_log_fileslist(self, IN_PATH) -> list[str]:
files_list = []
for SUB_FOLDER in self.SUB_FOLDER_LIST:
files_list.extend(glob.glob(str(IN_PATH / SUB_FOLDER / "*.txt")))
return files_list
def extract_votes_from_logs(self, files_list: list):
for in_logfile in tqdm(files_list):
SUB_FOLDER = (Path(in_logfile).parent).stem
out_txtfile = self.OUT_PATH / "# {0}_{1}.txt".format(SUB_FOLDER, Path(in_logfile).stem)
Utils().pick_vote_log(in_logfile, out_txtfile)
votefiles_list = Utils().get_file_list(self.OUT_PATH)
return votefiles_list
@staticmethod
def parse_vote_text2chunks(text: str):
"""
parse each game vote log into text chunks
one chunk example:
['Player1', 'Player2', 'Player3', 'Player5', 'Player6']. Say ONLY: I vote to eliminate ...
Player1(Witch): 49 | I vote to eliminate Player5
Player2(Villager): 49 | I vote to eliminate Player5
Player3(Villager): 49 | I vote to eliminate Player5
Player5(Werewolf): 49 | I vote to eliminate Player6
Player6(Seer): 49 | I vote to eliminate Player5
"""
pattern = re.compile(r"""\[([^\]]+)\]. Say ONLY: I vote to eliminate ...""")
chunks = {}
chunk_id = 0
last_end = 0
for match in pattern.finditer(text):
start = match.start()
chunk = text[last_end:start]
chunks[f"vote_{chunk_id}"] = chunk.strip()
last_end = match.end()
chunk_id += 1
final_chunk = text[last_end:].strip()
if final_chunk:
chunks[f"vote_{chunk_id}"] = final_chunk
return chunks
def _vote_rate_players(self, text: str):
"""
# calculate the rate of goodteam vote werewolves
:example:
input:
['Player1', 'Player2', 'Player3', 'Player5', 'Player6']. Say ONLY: I vote to eliminate ...
Player1(Witch): 49 | I vote to eliminate Player5
Player2(Villager): 49 | I vote to eliminate Player5
Player3(Villager): 49 | I vote to eliminate Player5
Player5(Werewolf): 49 | I vote to eliminate Player6
Player6(Seer): 49 | I vote to eliminate Player5
output:
werewolves: ['Player5']
non_werewolves: ['Player1', 'Player2', 'Player3', 'Player6']
as you can see :Player2(Villager) and Player3(Villager) vote to eliminate Player5(Werewolf)
:return goodteam vote rateability: 100.00%
"""
pattern = re.compile(r"(\w+)\(([^\)]+)\): \d+ \| I vote to eliminate (\w+)")
# find all werewolves
werewolves = []
for match in pattern.finditer(text):
if match.group(2) == RoleType.WEREWOLF.value:
werewolves.append(match.group(1))
# find all non_werewolves
non_werewolves = []
for match in pattern.finditer(text):
if match.group(2) != RoleType.WEREWOLF.value:
non_werewolves.append(match.group(1))
num_non_werewolves = len(non_werewolves)
# count players other than werewolves made the correct votes
correct_votes = 0
for match in pattern.finditer(text):
if match.group(2) != RoleType.WEREWOLF.value and match.group(3) in werewolves:
correct_votes += 1
# cal the rateability of non_werewolves
rate = correct_votes / num_non_werewolves
good_vote_rate = round(rate, 2)
return {"good_vote_rate": good_vote_rate, "werewolves": werewolves, "non_werewolves": non_werewolves}
def get_goodteam_vote_rate(self, text: str) -> float:
goodteam_vote_rate = self._vote_rate_players(text)["good_vote_rate"]
return goodteam_vote_rate
def get_werewolves(self, text: str) -> list:
werewolves_list = self._vote_rate_players(text)["werewolves"]
return werewolves_list
def get_non_werewolves(self, text: str) -> list:
non_werewolves_list = self._vote_rate_players(text)["non_werewolves"]
return non_werewolves_list
def get_votewolf_difficulty(self, werewolves: list, non_werewolves: list) -> str:
num_living_wolfs = len(werewolves)
num_living_players = len(werewolves) + len(non_werewolves)
votewolf_difficulty = "_{0} / {1}".format(num_living_wolfs, num_living_players)
return votewolf_difficulty
def get_result_df(self, out_txtfile: str) -> pd.DataFrame:
"""
folder: sub folders for evals
file: evaluation file, each file represents one game
votes: the number of votes, eg. vote_1 represents the first vote of this game,
good_vote_rate:the rateability of a good person voting against a werewolf,
correct_votes / the total number of players other than werewolves
total_votes:the total number of votes cast
"""
with open(out_txtfile, "r") as out_file:
text = out_file.read()
chunks = self.parse_vote_text2chunks(text)
res = []
for k, v in chunks.items():
if v != "":
chunks_list = list(chunks.keys())
total_votes = len(chunks_list) - 1
werewolves = self.get_werewolves(v)
non_werewolves = self.get_non_werewolves(v)
good_vote_rate = self.get_goodteam_vote_rate(v)
votewolf_difficulty = self.get_votewolf_difficulty(werewolves, non_werewolves)
folder = Utils().filename_to_foldername(out_txtfile)
result = {
"folder": folder,
"file": Path(out_txtfile).stem + ".txt",
"vote_round": k,
"good_vote_rate": good_vote_rate,
"total_votes": total_votes,
"votewolf_difficulty": votewolf_difficulty,
}
res.append(result)
df = pd.DataFrame(res)
return df
def calc_avg_rate(self, IN_PATH) -> pd.DataFrame:
"""
get avg_rate for each game
avg_rate : the good_rate/total number of votes in the game
vote1_rate: First Round Voting Accuracy Rate
"""
infiles_list = self._get_log_fileslist(IN_PATH)
votefiles_list = self.extract_votes_from_logs(infiles_list)
df_list = [self._load_df_from_file(file) for file in votefiles_list]
combined_df = pd.concat(df_list, ignore_index=True)
# calculate the average good_vote_rate for each file
mean_rates = self._calculate_mean_rates(combined_df)
combined_df["avg_rate"] = combined_df["file"].map(mean_rates)
# calculate vote1 rate
vote1_rates = self._calc_vote1_rates(combined_df)
combined_df["vote1_rate"] = combined_df["folder"].map(vote1_rates.set_index("folder")["good_vote_rate"])
combined_df.loc[combined_df["vote_round"] != "vote_1", "vote1_rate"] = np.nan
combined_df["vote1_rate"] = combined_df["vote1_rate"].apply(self._format_rates)
combined_df["good_vote_rate"] = combined_df["good_vote_rate"].apply(self._format_rates)
combined_df["avg_rate"] = combined_df["avg_rate"].apply(self._format_rates)
combined_df.sort_values(["file"], ascending=True, inplace=True)
return combined_df
def _calc_vote1_rates(self, df):
df_vote1 = df[df["vote_round"] == "vote_1"]
vote1_rates = df_vote1.groupby("folder")["good_vote_rate"].mean().reset_index()
return vote1_rates
def _load_df_from_file(self, file):
return self.get_result_df(file)
def _calculate_mean_rates(self, df):
return df.groupby("file")["good_vote_rate"].mean()
def _format_rates(self, s):
return Utils().float_to_percent(s)
def get_eval_csv(self, IN_PATH, EVAL_RESULT):
"""
IN_PATH : parent folder of ["01-10", "11-20", "21-30"]
EVAL_RESULT : output csv file path
"""
combined_df = self.calc_avg_rate(IN_PATH)
combined_df.to_csv(EVAL_RESULT, index=False)
if __name__ == "__main__":
IN_PATH = METAGPT_ROOT / "examples/werewolf_game/evals"
EVAL_RESULT = DEFAULT_WORKSPACE_ROOT / "outputs" / "goodteam_vote_rate.csv"
Vote().get_eval_csv(IN_PATH, EVAL_RESULT)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/werewolf_game/evals/utils.py | examples/werewolf_game/evals/utils.py | """
Filename: MetaGPT/examples/werewolf_game/evals/utils.py
Created Date: Oct 11, 2023
Revised Date: Oct 20, 2023
Author: [Aria](https://github.com/ariafyy)
"""
import glob
import os
import re
from pathlib import Path
from metagpt.const import METAGPT_ROOT
class Utils:
"""Utils: utils of logs"""
@staticmethod
def polish_log(in_logfile, out_txtfile):
"""polish logs for evaluation"""
pattern_text = r"(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3}) \| (\w+) +\| ([\w\.]+:\w+:\d+) - (.*\S)"
pattern_player = r"(Player(\d{1}): \w+)"
pattern_start = False
json_start = False
with open(in_logfile, "r") as f, open(out_txtfile, "w") as out:
for line in f.readlines():
matches = re.match(pattern_text, line)
if matches:
message = matches.group(4).strip()
pattern_start = True
json_start = False
if (
"Moderator(Moderator) ready to InstructSpeak" not in message
and "Moderator(Moderator) ready to ParseSpeak" not in message
and "Total running cost:" not in message
):
out.write("- " + message + "\n")
else:
out.write("\n")
elif pattern_start and not matches:
if "gpt-4 may update over time" in line:
line = ""
out.write(line)
elif line.strip().startswith("{"):
out.write(line.strip())
json_start = True
elif json_start and not line.strip().endswith("}"):
out.write(line.strip())
elif json_start and line.strip().endswith("}"):
out.write(line.strip())
json_start = False
elif (
line.startswith("(User):") or line.startswith("********** STEP:") or re.search(pattern_player, line)
):
out.write(line)
else:
out.write("\n")
@staticmethod
def pick_vote_log(in_logfile, out_txtfile):
"""
pick the vote log from the log file.
ready to AnnounceGameResult serves as the 'HINT_TEXT ' which indicates the end of the game.
based on bservation and reflection, then discuss is not in vote session.
"""
pattern_vote = r"(Player\d+)\(([A-Za-z]+)\): (\d+) \| (I vote to eliminate Player\d+)"
ignore_text = """reflection"""
HINT_TEXT = r"ready to AnnounceGameResult"
pattern_moderator = r"\[([^\]]+)\]\. Say ONLY: I vote to eliminate ..."
in_valid_block = False
with open(in_logfile, "r") as f:
lines = f.read()
split_lines = lines.split(HINT_TEXT)
if len(split_lines) < 2:
print(f"Key text :{HINT_TEXT} not found in {in_logfile}")
return
relevant_lines = split_lines[1].split("\n")
with open(out_txtfile, "w") as out:
for line in relevant_lines:
if re.search(pattern_moderator, line):
in_valid_block = True
out.write(line.lstrip() + "\n")
elif in_valid_block and re.search(pattern_vote, line):
out.write(line + "\n")
elif ignore_text in line:
in_valid_block = False
@staticmethod
def get_file_list(path: str) -> list:
file_pattern = os.path.join(path, "*.txt")
files_list = glob.glob(file_pattern)
return files_list
@staticmethod
def filename_to_foldername(out_txtfile: str):
"""
convert filename into its parent folder name
input:"....../# 01-10_10132100.txt"
output:# 01-10
"""
s = Path(out_txtfile).stem
pattern_folder = r"([^_]*)_"
match = re.match(pattern_folder, s)
if match:
folder = match.group(1)
return folder
@staticmethod
def float_to_percent(decimal: float) -> str:
"""
input: 1.00
output: 100.00%
"""
percent = decimal * 100
return f"{percent:.2f}%"
if __name__ == "__main__":
in_logfile = METAGPT_ROOT / "logs/log.txt"
out_txtfile = "input your wish path"
# Utils().polish_log(in_logfile, out_txtfile)
Utils().pick_vote_log(in_logfile, out_txtfile)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/ui_with_chainlit/init_setup.py | examples/ui_with_chainlit/init_setup.py | import asyncio
import chainlit as cl
from metagpt.environment import Environment
from metagpt.logs import logger, set_llm_stream_logfunc
from metagpt.roles import Role
from metagpt.utils.common import any_to_name
def log_llm_stream_chainlit(msg):
# Stream the message token into Chainlit UI.
cl.run_sync(chainlit_message.stream_token(msg))
set_llm_stream_logfunc(func=log_llm_stream_chainlit)
class ChainlitEnv(Environment):
"""Chainlit Environment for UI Integration"""
async def run(self, k=1):
"""处理一次所有信息的运行
Process all Role runs at once
"""
for _ in range(k):
futures = []
for role in self.roles.values():
# Call role.run with chainlit configuration
future = self._chainlit_role_run(role=role)
futures.append(future)
await asyncio.gather(*futures)
logger.debug(f"is idle: {self.is_idle}")
async def _chainlit_role_run(self, role: Role) -> None:
"""To run the role with chainlit config
Args:
role (Role): metagpt.role.Role
"""
global chainlit_message
chainlit_message = cl.Message(content="")
message = await role.run()
# If message is from role._act() publish to UI.
if message is not None and message.content != "No actions taken yet":
# Convert a message from action node in json format
chainlit_message.content = await self._convert_message_to_markdownjson(message=chainlit_message.content)
# message content from which role and its action...
chainlit_message.content += f"---\n\nAction: `{any_to_name(message.cause_by)}` done by `{role._setting}`."
await chainlit_message.send()
# for clean view in UI
async def _convert_message_to_markdownjson(self, message: str) -> str:
"""If the message is from MetaGPT Action Node output, then
convert it into markdown json for clear view in UI.
Args:
message (str): message by role._act
Returns:
str: message in mardown from
"""
if message.startswith("[CONTENT]"):
return f"```json\n{message}\n```\n"
return message
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/ui_with_chainlit/__init__.py | examples/ui_with_chainlit/__init__.py | python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false | |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/ui_with_chainlit/app.py | examples/ui_with_chainlit/app.py | from pathlib import Path
import chainlit as cl
from init_setup import ChainlitEnv
from metagpt.roles import (
Architect,
Engineer,
ProductManager,
ProjectManager,
QaEngineer,
)
from metagpt.team import Team
# https://docs.chainlit.io/concepts/starters
@cl.set_chat_profiles
async def chat_profile() -> list[cl.ChatProfile]:
"""Generates a chat profile containing starter messages which can be triggered to run MetaGPT
Returns:
list[chainlit.ChatProfile]: List of Chat Profile
"""
return [
cl.ChatProfile(
name="MetaGPT",
icon="/public/MetaGPT-new-log.jpg",
markdown_description="It takes a **one line requirement** as input and outputs **user stories / competitive analysis / requirements / data structures / APIs / documents, etc.**, But `everything in UI`.",
starters=[
cl.Starter(
label="Create a 2048 Game",
message="Create a 2048 game",
icon="/public/2048.jpg",
),
cl.Starter(
label="Write a cli Blackjack Game",
message="Write a cli Blackjack Game",
icon="/public/blackjack.jpg",
),
],
)
]
# https://docs.chainlit.io/concepts/message
@cl.on_message
async def startup(message: cl.Message) -> None:
"""On Message in UI, Create a MetaGPT software company
Args:
message (chainlit.Message): message by chainlist
"""
idea = message.content
company = Team(env=ChainlitEnv())
# Similar to software_company.py
company.hire(
[
ProductManager(),
Architect(),
ProjectManager(),
Engineer(n_borg=5, use_code_review=True),
QaEngineer(),
]
)
company.invest(investment=3.0)
company.run_project(idea=idea)
await company.run(n_round=5)
workdir = Path(company.env.context.config.project_path)
files = [file.name for file in workdir.iterdir() if file.is_file()]
files = "\n".join([f"{workdir}/{file}" for file in files if not file.startswith(".git")])
await cl.Message(
content=f"""
Codes can be found here:
{files}
---
Total cost: `{company.cost_manager.total_cost}`
"""
).send()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/rag/rag_search.py | examples/rag/rag_search.py | """Agent with RAG search."""
import asyncio
from examples.rag.rag_pipeline import DOC_PATH, QUESTION
from metagpt.logs import logger
from metagpt.rag.engines import SimpleEngine
from metagpt.roles import Sales
async def search():
"""Agent with RAG search."""
store = SimpleEngine.from_docs(input_files=[DOC_PATH])
role = Sales(profile="Sales", store=store)
result = await role.run(QUESTION)
logger.info(result)
if __name__ == "__main__":
asyncio.run(search())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/rag/rag_bm.py | examples/rag/rag_bm.py | # -*- coding: utf-8 -*-
"""RAG benchmark pipeline"""
import asyncio
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import NodeWithScore
from metagpt.const import DATA_PATH, EXAMPLE_BENCHMARK_PATH, EXAMPLE_DATA_PATH
from metagpt.logs import logger
from metagpt.rag.benchmark import RAGBenchmark
from metagpt.rag.engines import SimpleEngine
from metagpt.rag.factories import get_rag_embedding, get_rag_llm
from metagpt.rag.schema import (
BM25RetrieverConfig,
CohereRerankConfig,
ColbertRerankConfig,
FAISSIndexConfig,
FAISSRetrieverConfig,
)
from metagpt.utils.common import write_json_file
DOC_PATH = EXAMPLE_DATA_PATH / "rag_bm/summary_writer.txt"
QUESTION = "2023年7月20日,应急管理部、财政部联合下发《因灾倒塌、损坏住房恢复重建救助工作规范》的通知,规范倒损住房恢复重建救助相关工作。"
TRAVEL_DOC_PATH = EXAMPLE_DATA_PATH / "rag_bm/documents.txt"
TRAVEL_QUESTION = "国家卫生健康委在2023年7月28日开展的“启明行动”是为了防控哪个群体的哪种健康问题,并请列出活动发布的指导性文件名称。"
DATASET_PATH = EXAMPLE_DATA_PATH / "rag_bm/test.json"
SAVE_PATH = EXAMPLE_DATA_PATH / "rag_bm/result.json"
GROUND_TRUTH_TRANVEL = "2023-07-28 10:14:27作者:白剑峰来源:人民日报 ,正文:为在全社会形成重视儿童眼健康的良好氛围,持续推进综合防控儿童青少年近视工作落实,国家卫生健康委决定在全国持续开展“启明行动”——防控儿童青少年近视健康促进活动,并发布了《防控儿童青少年近视核心知识十条》。本次活动的主题为:重视儿童眼保健,守护孩子明眸“视”界。强调预防为主,推动关口前移,倡导和推动家庭及全社会共同行动起来,营造爱眼护眼的视觉友好环境,共同呵护好孩子的眼睛,让他们拥有一个光明的未来。国家卫生健康委要求,开展社会宣传和健康教育。充分利用网络、广播电视、报纸杂志、海报墙报、培训讲座等多种形式,广泛开展宣传倡导,向社会公众传播开展儿童眼保健、保护儿童视力健康的重要意义,以《防控儿童青少年近视核心知识十条》为重点普及预防近视科学知识。创新健康教育方式和载体,开发制作群众喜闻乐见的健康教育科普作品,利用互联网媒体扩大传播效果,提高健康教育的针对性、精准性和实效性。指导相关医疗机构将儿童眼保健和近视防控等科学知识纳入孕妇学校、家长课堂内容。开展儿童眼保健及视力检查咨询指导。医疗机构要以儿童家长和养育人为重点,结合眼保健和眼科临床服务,开展个性化咨询指导。要针对儿童常见眼病和近视防控等重点问题,通过面对面咨询指导,引导儿童家长树立近视防控意识,改变不良生活方式,加强户外活动,养成爱眼护眼健康行为习惯。提高儿童眼保健专科服务能力。各地要积极推进儿童眼保健专科建设,扎实组织好妇幼健康职业技能竞赛“儿童眼保健”项目,推动各层级开展比武练兵,提升业务能力。"
GROUND_TRUTH_ANSWER = "“启明行动”是为了防控儿童青少年的近视问题,并发布了《防控儿童青少年近视核心知识十条》。"
LLM_TIP = "If you not sure, just answer I don't know."
LLM_ERROR = "Retrieve failed due to LLM wasn't follow instruction"
EMPTY_ERROR = "Empty Response"
class RAGExample:
"""Show how to use RAG for evaluation."""
def __init__(self):
self.benchmark = RAGBenchmark()
self.embedding = get_rag_embedding()
self.llm = get_rag_llm()
async def rag_evaluate_pipeline(self, dataset_name: list[str] = ["all"]):
dataset_config = self.benchmark.load_dataset(dataset_name)
for dataset in dataset_config.datasets:
if "all" in dataset_name or dataset.name in dataset_name:
output_dir = DATA_PATH / f"{dataset.name}"
if output_dir.exists():
logger.info("Loading Existed index!")
logger.info(f"Index Path:{output_dir}")
self.engine = SimpleEngine.from_index(
index_config=FAISSIndexConfig(persist_path=output_dir),
ranker_configs=[ColbertRerankConfig()],
retriever_configs=[FAISSRetrieverConfig(), BM25RetrieverConfig()],
)
else:
logger.info("Loading index from documents!")
self.engine = SimpleEngine.from_docs(
input_files=dataset.document_files,
retriever_configs=[FAISSRetrieverConfig()],
ranker_configs=[CohereRerankConfig()],
transformations=[SentenceSplitter(chunk_size=1024, chunk_overlap=0)],
)
results = []
for gt_info in dataset.gt_info:
result = await self.rag_evaluate_single(
question=gt_info["question"],
reference=gt_info["gt_reference"],
ground_truth=gt_info["gt_answer"],
)
results.append(result)
logger.info(f"=====The {dataset.name} Benchmark dataset assessment is complete!=====")
self._print_bm_result(results)
write_json_file((EXAMPLE_BENCHMARK_PATH / dataset.name / "bm_result.json").as_posix(), results, "utf-8")
async def rag_evaluate_single(self, question, reference, ground_truth, print_title=True):
"""This example run rag pipeline, use faiss&bm25 retriever and llm ranker, will print something like:
Retrieve Result:
0. Productivi..., 10.0
1. I wrote cu..., 7.0
2. I highly r..., 5.0
Query Result:
Passion, adaptability, open-mindedness, creativity, discipline, and empathy are key qualities to be a good writer.
RAG BenchMark result:
{
'metrics':
{
'bleu-avg': 0.48318624982047,
'bleu-1': 0.5609756097560976,
'bleu-2': 0.5,
'bleu-3': 0.46153846153846156,
'bleu-4': 0.42105263157894735,
'rouge-L': 0.6865671641791045,
'semantic similarity': 0.9487444961487591,
'length': 74
},
'log': {
'generated_text':
'国家卫生健康委在2023年7月28日开展的“启明行动”是为了防控儿童青少年的近视问题。活动发布的指导性文件名称为《防控儿童青少年近视核心知识十条》。',
'ground_truth_text':
'“启明行动”是为了防控儿童青少年的近视问题,并发布了《防控儿童青少年近视核心知识十条》。'
}
}
"""
if print_title:
self._print_title("RAG Pipeline")
try:
nodes = await self.engine.aretrieve(question)
self._print_result(nodes, state="Retrieve")
answer = await self.engine.aquery(question)
self._print_result(answer, state="Query")
except Exception as e:
logger.error(e)
return self.benchmark.set_metrics(
generated_text=LLM_ERROR, ground_truth_text=ground_truth, question=question
)
result = await self.evaluate_result(answer.response, ground_truth, nodes, reference, question)
logger.info("==========RAG BenchMark result demo as follows==========")
logger.info(result)
return result
async def rag_faissdb(self):
"""This example show how to use FAISS. how to save and load index. will print something like:
Query Result:
Bob likes traveling.
"""
self._print_title("RAG FAISS")
# save index
output_dir = DATA_PATH / "rag_faiss"
SimpleEngine.from_docs(
input_files=[TRAVEL_DOC_PATH],
retriever_configs=[FAISSRetrieverConfig(dimensions=512, persist_path=output_dir)],
)
# load index
engine = SimpleEngine.from_index(
index_config=FAISSIndexConfig(persist_path=output_dir),
)
# query
nodes = engine.retrieve(QUESTION)
self._print_result(nodes, state="Retrieve")
answer = engine.query(TRAVEL_QUESTION)
self._print_result(answer, state="Query")
async def evaluate_result(
self,
response: str = None,
reference: str = None,
nodes: list[NodeWithScore] = None,
reference_doc: list[str] = None,
question: str = None,
):
result = await self.benchmark.compute_metric(response, reference, nodes, reference_doc, question)
return result
@staticmethod
def _print_title(title):
logger.info(f"{'#'*30} {title} {'#'*30}")
@staticmethod
def _print_result(result, state="Retrieve"):
"""print retrieve or query result"""
logger.info(f"{state} Result:")
if state == "Retrieve":
for i, node in enumerate(result):
logger.info(f"{i}. {node.text[:10]}..., {node.score}")
logger.info("======Retrieve Finished======")
return
logger.info(f"{result}\n")
@staticmethod
def _print_bm_result(result):
import pandas as pd
metrics = [
item["metrics"]
for item in result
if item["log"]["generated_text"] != LLM_ERROR and item["log"]["generated_text"] != EMPTY_ERROR
]
data = pd.DataFrame(metrics)
logger.info(f"\n {data.mean()}")
llm_errors = [item for item in result if item["log"]["generated_text"] == LLM_ERROR]
retrieve_errors = [item for item in result if item["log"]["generated_text"] == EMPTY_ERROR]
logger.info(
f"Percentage of retrieval failures due to incorrect LLM instruction following:"
f" {100.0 * len(llm_errors) / len(result)}%"
)
logger.info(
f"Percentage of retrieval failures due to retriever not recalling any documents is:"
f" {100.0 * len(retrieve_errors) / len(result)}%"
)
async def _retrieve_and_print(self, question):
nodes = await self.engine.aretrieve(question)
self._print_result(nodes, state="Retrieve")
return nodes
async def main():
"""RAG pipeline"""
e = RAGExample()
await e.rag_evaluate_pipeline()
if __name__ == "__main__":
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/rag/rag_pipeline.py | examples/rag/rag_pipeline.py | """RAG pipeline"""
import asyncio
from pydantic import BaseModel
from metagpt.const import DATA_PATH, EXAMPLE_DATA_PATH
from metagpt.logs import logger
from metagpt.rag.engines import SimpleEngine
from metagpt.rag.schema import (
ChromaIndexConfig,
ChromaRetrieverConfig,
ElasticsearchIndexConfig,
ElasticsearchRetrieverConfig,
ElasticsearchStoreConfig,
FAISSRetrieverConfig,
LLMRankerConfig,
)
from metagpt.utils.exceptions import handle_exception
LLM_TIP = "If you not sure, just answer I don't know."
DOC_PATH = EXAMPLE_DATA_PATH / "rag/writer.txt"
QUESTION = f"What are key qualities to be a good writer? {LLM_TIP}"
TRAVEL_DOC_PATH = EXAMPLE_DATA_PATH / "rag/travel.txt"
TRAVEL_QUESTION = f"What does Bob like? {LLM_TIP}"
class Player(BaseModel):
"""To demonstrate rag add objs."""
name: str = ""
goal: str = "Win The 100-meter Sprint."
tool: str = "Red Bull Energy Drink."
def rag_key(self) -> str:
"""For search"""
return self.goal
class RAGExample:
"""Show how to use RAG."""
def __init__(self, engine: SimpleEngine = None, use_llm_ranker: bool = True):
self._engine = engine
self._use_llm_ranker = use_llm_ranker
@property
def engine(self):
if not self._engine:
ranker_configs = [LLMRankerConfig()] if self._use_llm_ranker else None
self._engine = SimpleEngine.from_docs(
input_files=[DOC_PATH],
retriever_configs=[FAISSRetrieverConfig()],
ranker_configs=ranker_configs,
)
return self._engine
@engine.setter
def engine(self, value: SimpleEngine):
self._engine = value
@handle_exception
async def run_pipeline(self, question=QUESTION, print_title=True):
"""This example run rag pipeline, use faiss retriever and llm ranker, will print something like:
Retrieve Result:
0. Productivi..., 10.0
1. I wrote cu..., 7.0
2. I highly r..., 5.0
Query Result:
Passion, adaptability, open-mindedness, creativity, discipline, and empathy are key qualities to be a good writer.
"""
if print_title:
self._print_title("Run Pipeline")
nodes = await self.engine.aretrieve(question)
self._print_retrieve_result(nodes)
answer = await self.engine.aquery(question)
self._print_query_result(answer)
@handle_exception
async def add_docs(self):
"""This example show how to add docs.
Before add docs llm anwser I don't know.
After add docs llm give the correct answer, will print something like:
[Before add docs]
Retrieve Result:
Query Result:
Empty Response
[After add docs]
Retrieve Result:
0. Bob like..., 10.0
Query Result:
Bob likes traveling.
"""
self._print_title("Add Docs")
travel_question = f"{TRAVEL_QUESTION}"
travel_filepath = TRAVEL_DOC_PATH
logger.info("[Before add docs]")
await self.run_pipeline(question=travel_question, print_title=False)
logger.info("[After add docs]")
self.engine.add_docs([travel_filepath])
await self.run_pipeline(question=travel_question, print_title=False)
@handle_exception
async def add_objects(self, print_title=True):
"""This example show how to add objects.
Before add docs, engine retrieve nothing.
After add objects, engine give the correct answer, will print something like:
[Before add objs]
Retrieve Result:
[After add objs]
Retrieve Result:
0. 100m Sprin..., 10.0
[Object Detail]
{'name': 'Mike', 'goal': 'Win The 100-meter Sprint', 'tool': 'Red Bull Energy Drink'}
"""
if print_title:
self._print_title("Add Objects")
player = Player(name="Mike")
question = f"{player.rag_key()}"
logger.info("[Before add objs]")
await self._retrieve_and_print(question)
logger.info("[After add objs]")
self.engine.add_objs([player])
try:
nodes = await self._retrieve_and_print(question)
logger.info("[Object Detail]")
player: Player = nodes[0].metadata["obj"]
logger.info(player.name)
except Exception as e:
logger.error(f"nodes is empty, llm don't answer correctly, exception: {e}")
@handle_exception
async def init_objects(self):
"""This example show how to from objs, will print something like:
Same as add_objects.
"""
self._print_title("Init Objects")
pre_engine = self.engine
self.engine = SimpleEngine.from_objs(retriever_configs=[FAISSRetrieverConfig()])
await self.add_objects(print_title=False)
self.engine = pre_engine
@handle_exception
async def init_and_query_chromadb(self):
"""This example show how to use chromadb. how to save and load index. will print something like:
Query Result:
Bob likes traveling.
"""
self._print_title("Init And Query ChromaDB")
# 1. save index
output_dir = DATA_PATH / "rag"
SimpleEngine.from_docs(
input_files=[TRAVEL_DOC_PATH],
retriever_configs=[ChromaRetrieverConfig(persist_path=output_dir)],
)
# 2. load index
engine = SimpleEngine.from_index(index_config=ChromaIndexConfig(persist_path=output_dir))
# 3. query
answer = await engine.aquery(TRAVEL_QUESTION)
self._print_query_result(answer)
@handle_exception
async def init_and_query_es(self):
"""This example show how to use es. how to save and load index. will print something like:
Query Result:
Bob likes traveling.
"""
self._print_title("Init And Query Elasticsearch")
# 1. create es index and save docs
store_config = ElasticsearchStoreConfig(index_name="travel", es_url="http://127.0.0.1:9200")
engine = SimpleEngine.from_docs(
input_files=[TRAVEL_DOC_PATH],
retriever_configs=[ElasticsearchRetrieverConfig(store_config=store_config)],
)
# 2. load index
engine = SimpleEngine.from_index(index_config=ElasticsearchIndexConfig(store_config=store_config))
# 3. query
answer = await engine.aquery(TRAVEL_QUESTION)
self._print_query_result(answer)
@staticmethod
def _print_title(title):
logger.info(f"{'#'*30} {title} {'#'*30}")
@staticmethod
def _print_retrieve_result(result):
"""Print retrieve result."""
logger.info("Retrieve Result:")
for i, node in enumerate(result):
logger.info(f"{i}. {node.text[:10]}..., {node.score}")
logger.info("")
@staticmethod
def _print_query_result(result):
"""Print query result."""
logger.info("Query Result:")
logger.info(f"{result}\n")
async def _retrieve_and_print(self, question):
nodes = await self.engine.aretrieve(question)
self._print_retrieve_result(nodes)
return nodes
async def main():
"""RAG pipeline.
Note:
1. If `use_llm_ranker` is True, then it will use LLM Reranker to get better result, but it is not always guaranteed that the output will be parseable for reranking,
prefer `gpt-4-turbo`, otherwise might encounter `IndexError: list index out of range` or `ValueError: invalid literal for int() with base 10`.
"""
e = RAGExample(use_llm_ranker=False)
await e.run_pipeline()
await e.add_docs()
await e.add_objects()
await e.init_objects()
await e.init_and_query_chromadb()
await e.init_and_query_es()
if __name__ == "__main__":
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/rag/omniparse.py | examples/rag/omniparse.py | import asyncio
from metagpt.config2 import config
from metagpt.const import EXAMPLE_DATA_PATH
from metagpt.logs import logger
from metagpt.rag.parsers import OmniParse
from metagpt.rag.schema import OmniParseOptions, OmniParseType, ParseResultType
from metagpt.utils.omniparse_client import OmniParseClient
TEST_DOCX = EXAMPLE_DATA_PATH / "omniparse/test01.docx"
TEST_PDF = EXAMPLE_DATA_PATH / "omniparse/test02.pdf"
TEST_VIDEO = EXAMPLE_DATA_PATH / "omniparse/test03.mp4"
TEST_AUDIO = EXAMPLE_DATA_PATH / "omniparse/test04.mp3"
async def omniparse_client_example():
client = OmniParseClient(base_url=config.omniparse.base_url)
# docx
with open(TEST_DOCX, "rb") as f:
file_input = f.read()
document_parse_ret = await client.parse_document(file_input=file_input, bytes_filename="test_01.docx")
logger.info(document_parse_ret)
# pdf
pdf_parse_ret = await client.parse_pdf(file_input=TEST_PDF)
logger.info(pdf_parse_ret)
# video
video_parse_ret = await client.parse_video(file_input=TEST_VIDEO)
logger.info(video_parse_ret)
# audio
audio_parse_ret = await client.parse_audio(file_input=TEST_AUDIO)
logger.info(audio_parse_ret)
async def omniparse_example():
parser = OmniParse(
api_key=config.omniparse.api_key,
base_url=config.omniparse.base_url,
parse_options=OmniParseOptions(
parse_type=OmniParseType.PDF,
result_type=ParseResultType.MD,
max_timeout=120,
num_workers=3,
),
)
ret = parser.load_data(file_path=TEST_PDF)
logger.info(ret)
file_paths = [TEST_DOCX, TEST_PDF]
parser.parse_type = OmniParseType.DOCUMENT
ret = await parser.aload_data(file_path=file_paths)
logger.info(ret)
async def main():
await omniparse_client_example()
await omniparse_example()
if __name__ == "__main__":
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/aflow/optimize.py | examples/aflow/optimize.py | # -*- coding: utf-8 -*-
# @Date : 8/23/2024 20:00 PM
# @Author : didi
# @Desc : Entrance of AFlow.
import argparse
from typing import Dict, List
from metagpt.configs.models_config import ModelsConfig
from metagpt.ext.aflow.data.download_data import download
from metagpt.ext.aflow.scripts.optimizer import Optimizer
class ExperimentConfig:
def __init__(self, dataset: str, question_type: str, operators: List[str]):
self.dataset = dataset
self.question_type = question_type
self.operators = operators
EXPERIMENT_CONFIGS: Dict[str, ExperimentConfig] = {
"DROP": ExperimentConfig(
dataset="DROP",
question_type="qa",
operators=["Custom", "AnswerGenerate", "ScEnsemble"],
),
"HotpotQA": ExperimentConfig(
dataset="HotpotQA",
question_type="qa",
operators=["Custom", "AnswerGenerate", "ScEnsemble"],
),
"MATH": ExperimentConfig(
dataset="MATH",
question_type="math",
operators=["Custom", "ScEnsemble", "Programmer"],
),
"GSM8K": ExperimentConfig(
dataset="GSM8K",
question_type="math",
operators=["Custom", "ScEnsemble", "Programmer"],
),
"MBPP": ExperimentConfig(
dataset="MBPP",
question_type="code",
operators=["Custom", "CustomCodeGenerate", "ScEnsemble", "Test"],
),
"HumanEval": ExperimentConfig(
dataset="HumanEval",
question_type="code",
operators=["Custom", "CustomCodeGenerate", "ScEnsemble", "Test"],
),
}
def parse_args():
parser = argparse.ArgumentParser(description="AFlow Optimizer")
parser.add_argument(
"--dataset",
type=str,
choices=list(EXPERIMENT_CONFIGS.keys()),
required=True,
help="Dataset type",
)
parser.add_argument("--sample", type=int, default=4, help="Sample count")
parser.add_argument(
"--optimized_path",
type=str,
default="metagpt/ext/aflow/scripts/optimized",
help="Optimized result save path",
)
parser.add_argument("--initial_round", type=int, default=1, help="Initial round")
parser.add_argument("--max_rounds", type=int, default=20, help="Max iteration rounds")
parser.add_argument("--check_convergence", type=bool, default=True, help="Whether to enable early stop")
parser.add_argument("--validation_rounds", type=int, default=5, help="Validation rounds")
parser.add_argument(
"--if_first_optimize",
type=lambda x: x.lower() == "true",
default=True,
help="Whether to download dataset for the first time",
)
parser.add_argument(
"--opt_model_name",
type=str,
default="claude-3-5-sonnet-20240620",
help="Specifies the name of the model used for optimization tasks.",
)
parser.add_argument(
"--exec_model_name",
type=str,
default="gpt-4o-mini",
help="Specifies the name of the model used for execution tasks.",
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
config = EXPERIMENT_CONFIGS[args.dataset]
models_config = ModelsConfig.default()
opt_llm_config = models_config.get(args.opt_model_name)
if opt_llm_config is None:
raise ValueError(
f"The optimization model '{args.opt_model_name}' was not found in the 'models' section of the configuration file. "
"Please add it to the configuration file or specify a valid model using the --opt_model_name flag. "
)
exec_llm_config = models_config.get(args.exec_model_name)
if exec_llm_config is None:
raise ValueError(
f"The execution model '{args.exec_model_name}' was not found in the 'models' section of the configuration file. "
"Please add it to the configuration file or specify a valid model using the --exec_model_name flag. "
)
download(["datasets", "initial_rounds"], if_first_download=args.if_first_optimize)
optimizer = Optimizer(
dataset=config.dataset,
question_type=config.question_type,
opt_llm_config=opt_llm_config,
exec_llm_config=exec_llm_config,
check_convergence=args.check_convergence,
operators=config.operators,
optimized_path=args.optimized_path,
sample=args.sample,
initial_round=args.initial_round,
max_rounds=args.max_rounds,
validation_rounds=args.validation_rounds,
)
# Optimize workflow via setting the optimizer's mode to 'Graph'
optimizer.optimize("Graph")
# Test workflow via setting the optimizer's mode to 'Test'
# optimizer.optimize("Test")
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/di/automated_planning_of_tasks.py | examples/di/automated_planning_of_tasks.py | import fire
from metagpt.logs import logger
from metagpt.roles.di.team_leader import TeamLeader
async def main():
# Create an instance of TeamLeader
tl = TeamLeader()
# Update the plan with the goal to create a 2048 game
# This will auto generate tasks needed to accomplish the goal
await tl.planner.update_plan(goal="create a 2048 game.")
# Iterate through all tasks in the plan
# Log each task's ID, instruction and completion status
for task in tl.planner.plan.tasks:
logger.info(f"- {task.task_id}: {task.instruction} (Completed: {task.is_finished})")
if __name__ == "__main__":
fire.Fire(main)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/di/run_flask.py | examples/di/run_flask.py | import asyncio
from metagpt.roles.di.data_interpreter import DataInterpreter
USE_GOT_REPO_REQ = """
Write a service using Flask, create a conda environment and run it, and call the service's interface for validation.
Notice: Don't write all codes in one response, each time, just write code for one step.
"""
# If you have created a conda environment, you can say:
# I have created the conda environment '{env_name}', please use this environment to execute.
async def main():
di = DataInterpreter(tools=["Terminal", "Editor"])
await di.run(USE_GOT_REPO_REQ)
if __name__ == "__main__":
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/di/email_summary.py | examples/di/email_summary.py | # -*- encoding: utf-8 -*-
"""
@Date : 2024/02/07
@Author : Tuo Zhou
@File : email_summary.py
"""
import os
from metagpt.roles.di.data_interpreter import DataInterpreter
async def main():
email_account = "your_email_account"
# your password will stay only on your device and not go to LLM api
os.environ["email_password"] = "your_email_password"
### Prompt for automatic email reply, uncomment to try this too ###
# prompt = f"""I will give you your Outlook email account ({email_account}) and password (email_password item in the environment variable). You need to find the latest email in my inbox with the sender's suffix @gmail.com and reply "Thank you! I have received your email~"""""
### Prompt for automatic email summary ###
prompt = f"""I will give you your Outlook email account ({email_account}) and password (email_password item in the environment variable).
Firstly, Please help me fetch the latest 5 senders and full letter contents.
Then, summarize each of the 5 emails into one sentence (you can do this by yourself, no need to import other models to do this) and output them in a markdown format."""
di = DataInterpreter()
await di.run(prompt)
if __name__ == "__main__":
import asyncio
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/di/machine_learning.py | examples/di/machine_learning.py | import fire
from metagpt.roles.di.data_interpreter import DataInterpreter
WINE_REQ = "Run data analysis on sklearn Wine recognition dataset, include a plot, and train a model to predict wine class (20% as validation), and show validation accuracy."
DATA_DIR = "path/to/your/data"
# sales_forecast data from https://www.kaggle.com/datasets/aslanahmedov/walmart-sales-forecast/data
SALES_FORECAST_REQ = f"""Train a model to predict sales for each department in every store (split the last 40 weeks records as validation dataset, the others is train dataset), include plot total sales trends, print metric and plot scatter plots of
groud truth and predictions on validation data. Dataset is {DATA_DIR}/train.csv, the metric is weighted mean absolute error (WMAE) for test data. Notice: *print* key variables to get more information for next task step.
"""
REQUIREMENTS = {"wine": WINE_REQ, "sales_forecast": SALES_FORECAST_REQ}
async def main(use_case: str = "wine"):
mi = DataInterpreter()
requirement = REQUIREMENTS[use_case]
await mi.run(requirement)
if __name__ == "__main__":
fire.Fire(main)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/di/custom_tool.py | examples/di/custom_tool.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/3/22 10:54
@Author : alexanderwu
@File : custom_tool.py
"""
from metagpt.roles.di.data_interpreter import DataInterpreter
from metagpt.tools.tool_registry import register_tool
@register_tool()
def magic_function(arg1: str, arg2: int) -> dict:
"""
The magic function that does something.
Args:
arg1 (str): ...
arg2 (int): ...
Returns:
dict: ...
"""
return {"arg1": arg1 * 3, "arg2": arg2 * 5}
async def main():
di = DataInterpreter(tools=["magic_function"])
await di.run("Just call the magic function with arg1 'A' and arg2 2. Tell me the result.")
if __name__ == "__main__":
import asyncio
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/di/ocr_receipt.py | examples/di/ocr_receipt.py | from metagpt.const import EXAMPLE_DATA_PATH
from metagpt.roles.di.data_interpreter import DataInterpreter
async def main():
# Notice: pip install metagpt[ocr] before using this example
image_path = EXAMPLE_DATA_PATH / "di/receipt_shopping.jpg"
language = "English"
requirement = f"""This is a {language} receipt image.
Your goal is to perform OCR on images using PaddleOCR, output text content from the OCR results and discard
coordinates and confidence levels, then recognize the total amount from ocr text content, and finally save as csv table.
Image path: {image_path}.
NOTE: The environments for Paddle and PaddleOCR are all ready and has been fully installed."""
di = DataInterpreter(react_mode="react")
await di.run(requirement)
if __name__ == "__main__":
import asyncio
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/di/data_visualization.py | examples/di/data_visualization.py | import asyncio
from metagpt.logs import logger
from metagpt.roles.di.data_interpreter import DataInterpreter
from metagpt.utils.recovery_util import save_history
async def main(requirement: str = ""):
di = DataInterpreter()
rsp = await di.run(requirement)
logger.info(rsp)
save_history(role=di)
if __name__ == "__main__":
requirement = "Run data analysis on sklearn Iris dataset, include a plot"
asyncio.run(main(requirement))
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/di/software_company.py | examples/di/software_company.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import fire
from metagpt.roles.di.data_interpreter import DataInterpreter
async def main():
prompt = """
This is a software requirement:
```text
write a snake game
```
---
1. Writes a PRD based on software requirements.
2. Writes a design to the project repository, based on the PRD of the project.
3. Writes a project plan to the project repository, based on the design of the project.
4. Writes codes to the project repository, based on the project plan of the project.
5. Run QA test on the project repository.
6. Stage and commit changes for the project repository using Git.
Note: All required dependencies and environments have been fully installed and configured.
"""
di = DataInterpreter(
tools=[
"WritePRD",
"WriteDesign",
"WritePlan",
"WriteCode",
"RunCode",
"DebugError",
# "git_archive",
]
)
await di.run(prompt)
if __name__ == "__main__":
fire.Fire(main)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/di/atomization_capacity_plan.py | examples/di/atomization_capacity_plan.py | import fire
from metagpt.logs import logger
from metagpt.roles.di.team_leader import TeamLeader
async def main():
tl = TeamLeader()
logger.info("\n=== Adding Initial Tasks ===")
tl.planner.plan.append_task(
task_id="T1", dependent_task_ids=[], instruction="Create Product Requirements Document (PRD)", assignee="Alice"
)
tl.planner.plan.append_task(
task_id="T2", dependent_task_ids=["T1"], instruction="Design System Architecture", assignee="Bob"
)
# 1. Add Development Tasks
logger.info("\n=== Adding Development Tasks ===")
tl.planner.plan.append_task(
task_id="T3", dependent_task_ids=["T2"], instruction="Implement Core Function Modules", assignee="Alex"
)
tl.planner.plan.append_task(
task_id="T4", dependent_task_ids=["T2"], instruction="Implement User Interface", assignee="Alex"
)
# 2. Complete Some Tasks
logger.info("\n=== Execute and Complete Tasks ===")
logger.info(f"Current Task: {tl.planner.plan.current_task.instruction}")
tl.planner.plan.finish_current_task() # Complete T1
logger.info(f"Current Task: {tl.planner.plan.current_task.instruction}")
tl.planner.plan.finish_current_task() # Complete T2
# 3. Replace Tasks
logger.info("\n=== Replace Task ===")
tl.planner.plan.replace_task(
task_id="T3",
new_dependent_task_ids=["T2"],
new_instruction="Implement Core Function Modules (Add New Features)",
new_assignee="Senior_Developer",
)
# 4. Add Testing Tasks
logger.info("\n=== Add Testing Tasks ===")
tl.planner.plan.append_task(
task_id="T5", dependent_task_ids=["T3", "T4"], instruction="Execute Integration Tests", assignee="Edward"
)
# 5. Reset Task Demonstration
logger.info("\n=== Reset Task ===")
logger.info("Reset Task T3 (This will also reset T5 which depends on it)")
tl.planner.plan.reset_task("T3")
# Display Final Status
logger.info("\n=== Final Status ===")
logger.info(f"Completed Tasks: {len([t for t in tl.planner.plan.tasks if t.is_finished])}")
logger.info(f"Current Task: {tl.planner.plan.current_task.instruction}")
logger.info("All Tasks:")
for task in tl.planner.plan.tasks:
logger.info(f"- {task.task_id}: {task.instruction} (Completed: {task.is_finished})")
if __name__ == "__main__":
fire.Fire(main)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/di/imitate_webpage.py | examples/di/imitate_webpage.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/01/15
@Author : mannaandpoem
@File : imitate_webpage.py
"""
from metagpt.roles.di.data_interpreter import DataInterpreter
async def main():
web_url = "https://pytorch.org/"
prompt = f"""This is a URL of webpage: '{web_url}' .
Firstly, open the page and take a screenshot of the page.
Secondly, convert the image to a webpage including HTML, CSS and JS in one go.
Note: All required dependencies and environments have been fully installed and configured."""
di = DataInterpreter(tools=["GPTvGenerator", "Browser"])
await di.run(prompt)
if __name__ == "__main__":
import asyncio
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/di/requirements_prompt.py | examples/di/requirements_prompt.py | # InfiAgent-DABench requirements
DABENCH = "You are required to {question} from a CSV file named {file_name}. **Constraints**: Ensure that {constraints}, which must be strictly followed throughout the task. The output format should be {format}. This task is categorized as {level}."
# ML-Benchmark requirements
IRIS_REQ = "Run data analysis on sklearn Iris dataset, include a plot"
WINES_RECOGNITION_REQ = "Run data analysis on sklearn Wine recognition dataset, include a plot, and train a model to predict wine class with 20% as test set, and show prediction accuracy"
BREAST_CANCER_WISCONSIN_REQ = "Run data analysis on sklearn Wisconsin Breast Cancer dataset, include a plot, train a model to predict targets (20% as validation), and show validation accuracy"
TITANIC_REQ = "This is a titanic passenger survival dataset, your goal is to predict passenger survival outcome. The target column is Survived. Perform data analysis, data preprocessing, feature engineering, and modeling to predict the target. Report accuracy on the eval data. Train data path: '{data_dir}/di_dataset/ml_benchmark/04_titanic/split_train.csv', eval data path: '{data_dir}/di_dataset/ml_benchmark/04_titanic/split_eval.csv'."
HOUSE_PRICES_ADVANCED_REGRESSION_TECHNIQUES_REQ = "This is a house price dataset, your goal is to predict the sale price of a property based on its features. The target column is SalePrice. Perform data analysis, data preprocessing, feature engineering, and modeling to predict the target. Report RMSE between the logarithm of the predicted value and the logarithm of the observed sales price on the eval data. Train data path: '{data_dir}/di_dataset/ml_benchmark/05_house-prices-advanced-regression-techniques/split_train.csv', eval data path: '{data_dir}/di_dataset/ml_benchmark/05_house-prices-advanced-regression-techniques/split_eval.csv'."
SANTANDER_CUSTOMER_TRANSACTION_PREDICTION_REQ = "This is a customers financial dataset. Your goal is to predict which customers will make a specific transaction in the future. The target column is target. Perform data analysis, data preprocessing, feature engineering, and modeling to predict the target. Report AUC on the eval data. Train data path: '{data_dir}/di_dataset/ml_benchmark/06_santander-customer-transaction-prediction/split_train.csv', eval data path: '{data_dir}/di_dataset/ml_benchmark/06_santander-customer-transaction-prediction/split_eval.csv' ."
ICR_IDENTITY_AGE_RELATED_CONDITIONS_REQ = "This is a medical dataset with over fifty anonymized health characteristics linked to three age-related conditions. Your goal is to predict whether a subject has or has not been diagnosed with one of these conditions. The target column is Class. Perform data analysis, data preprocessing, feature engineering, and modeling to predict the target. Report F1 Score on the eval data. Train data path: '{data_dir}/di_dataset/ml_benchmark/07_icr-identify-age-related-conditions/split_train.csv', eval data path: '{data_dir}/di_dataset/ml_benchmark/07_icr-identify-age-related-conditions/split_eval.csv' ."
SANTANDER_VALUE_PREDICTION_CHALLENGE_REQ = "This is a customers financial dataset. Your goal is to predict the value of transactions for each potential customer. The target column is target. Perform data analysis, data preprocessing, feature engineering, and modeling to predict the target. Report RMSLE on the eval data. Train data path: '{data_dir}/di_dataset/ml_benchmark/08_santander-value-prediction-challenge/split_train.csv', eval data path: '{data_dir}/di_dataset/ml_benchmark/08_santander-value-prediction-challenge/split_eval.csv' ."
# Open-Ended Tasks requirements
OCR_REQ_01 = "This is an English invoice image. Your goal is to perform OCR on the image, extract the total amount from ocr result and save as table, using PaddleOCR. The PaddleOCR environment has been fully installed, try to use Paddleocr as much as possible. Image path: '{data_dir}/di_dataset/open_ended_tasks/01_ocr.png"
OCR_REQ_02 = "This is a Chinese invoice image. Your goal is to perform OCR on the image and only output the recognized text word results, nothing else is needed, then extract the total amount and receipt ID starting with 'No' from ocr text words results and save as table, using PaddleOCR. The PaddleOCR environment has been fully installed, try to use Paddleocr as much as possible. Image path: '{data_dir}/di_dataset/open_ended_tasks/02_ocr.jpg"
OCR_REQ_03 = "This is an invoice image for OCR. Your goal is to perform OCR on the image, extract the total amount and save it into an Excel table format, using PaddleOCR with lang='en' The PaddleOCR environment has been fully installed, try to use Paddleocr as much as possible. Image path: '{data_dir}/di_dataset/open_ended_tasks/03_ocr.jpg"
WEB_SEARCH_AND_CRAWLING_REQ_04 = "Get data from `paperlist` table in https://papercopic.com/statistics/iclr-statistics/iclr-2024-statistics/ , and save it to a csv file. paper title must include `multiagent` or `large language model`. **notice: print key variables**"
WEB_SEARCH_AND_CRAWLING_REQ_05 = "Obtain the CPI data from https://www.stats.gov.cn/sj/sjjd/202307/t20230718_1941322.html, please follow this plan step by step: 1. Detect the encoding type and HTML structure of the target webpage. 2. Crawl the webpage, de-duplicate the body content, convert it to a clear paragraph suitable for reading as plain text, and save it to target.txt. 3. Design multiple regular expressions to match key sentences in target.txt, use try-except statements to combine the various regular expression matches, note that the webpage text is in Chinese. 4. Finally, use a Chinese summary to summarize the key sentences to answer the user's request. **Note: If it is a code block, print out the key variable results of the code block; if it is webpage text, print the first 200 characters.**"
WEB_SEARCH_AND_CRAWLING_REQ_06 = "Get products data from website https://scrapeme.live/shop/ and save it as a csv file. Notice: Firstly parse the web page encoding and the text HTML structure; The first page product name, price, product URL, and image URL must be saved in the csv;"
WEB_SEARCH_AND_CRAWLING_REQ_07 = "从36kr创投平台https://pitchhub.36kr.com/financing-flash所有初创企业融资的信息, **注意: 这是⼀个中⽂⽹站**; 下⾯是⼀个⼤致流程, 你会根据每⼀步的运⾏结果对当前计划中的任务做出适当调整: 1. 爬取并本地保存html结构; 2. 直接打印第7个**快讯**关键词后2000个字符的html内容, 作为**快讯的html内容示例**; 3. 反思**快讯的html内容示例**中的规律, 设计正则匹配表达式来获取**快讯**的标题、链接、时间; 4. 筛选最近3天的初创企业融资**快讯**, 以list[dict]形式打印前5个。5. 将全部结果存在本地csv中"
EMAIL_REPLY_REQ_08 = """You are an agent that automatically reads and replies to emails. I will give you your Outlook email account and password. You need to check the content of the latest email and return it to me. If the email address suffix of this email is @xxx.xxx, please automatically reply with "I've received your email and will reply as soon as possible. Thank you!" Email account: xxx@xxx.xxx Email Password: xxxx"""
WEB_PAGE_IMITATION_REQ_09 = "This is a URL of webpage: https://medium.com/ . Firstly, utilize Selenium and WebDriver for rendering. Secondly, convert image to a webpage including HTML, CSS and JS in one go. Finally, save webpage in a text file. All required dependencies and environments have been fully installed and configured."
WEB_PAGE_IMITATION_REQ_10 = "This is a URL of webpage: https://pytorch.org/ . Firstly, utilize Selenium and WebDriver for rendering. Secondly, convert image to a webpage including HTML, CSS and JS in one go. Finally, save webpage in a file. NOTE: All required dependencies and environments have been fully installed and configured."
WEB_PAGE_IMITATION_REQ_11 = "This is a URL of webpage: https://www.kaggle.com/ . Firstly, utilize Selenium and WebDriver to render the webpage, ensuring the browser window is maximized for an optimal viewing experience. Secondly, convert image to a webpage including HTML, CSS and JS in one go. Finally, save webpage in a file. NOTE: All required dependencies and environments have been fully installed and configured."
WEB_PAGE_IMITATION_REQ_12 = "This is a URL of webpage: https://chat.openai.com/auth/login . Firstly, utilize Selenium and WebDriver to render the webpage, ensuring the browser window is maximized for an optimal viewing experience. Secondly, convert image to a webpage including HTML, CSS and JS in one go. Finally, save webpage in a file. NOTE: All required dependencies and environments have been fully installed and configured."
WEB_PAGE_IMITATION_REQ_13 = "This is a URL of webpage: https://deepmind.google/technologies/gemini/#introduction . Firstly, utilize Selenium and WebDriver to render the webpage, ensuring the browser window is maximized for an optimal viewing experience. Secondly, convert image to a webpage including HTML, CSS and JS in one go. Finally, save webpage in a file. NOTE: All required dependencies and environments have been fully installed and configured."
IMAGE_BACKGROUND_REMOVAL_REQ_14 = "This is an image, you need to use python toolkit rembg remove the background of the image. image path:'{data_dir}/di_dataset/open_ended_tasks/14_image_background_removal.jpg'; save path:'{data_dir}/di_dataset/open_ended_tasks/14_image_background_removal_result.jpg'"
TEXT2IMG_REQ_15 = """I want to generate an image of a beautiful girl using the stable diffusion text2image tool, sd_url = 'http://your.sd.service.ip:port'"""
IMAGE2CODE_GENERATION_REQ_16 = "This is a image. First, convert the image to webpage code including HTML, CSS and JS in one go, and finally save webpage code in a file.The image path: '{data_dir}/di_dataset/open_ended_tasks/16_image_2_code_generation.png'. NOTE: All required dependencies and environments have been fully installed and configured."
IMAGE2CODE_GENERATION_REQ_17 = "This is a image. First, convert the image to webpage code including HTML, CSS and JS in one go, and finally save webpage code in a file.The image path: '{data_dir}/di_dataset/open_ended_tasks/17_image_2_code_generation.png'. NOTE: All required dependencies and environments have been fully installed and configured."
GENERATE_GAMES_REQ_18 = "Create a Snake game. Players need to control the movement of the snake to eat food and grow its body, while avoiding the snake's head touching their own body or game boundaries. Games need to have basic game logic, user interface. During the production process, please consider factors such as playability, beautiful interface, and convenient operation of the game. Note: pyxel environment already satisfied"
GENERATE_GAMES_REQ_19 = "You are a professional game developer, please use pyxel software to create a simple jumping game. The game needs to include a character that can move left and right on the screen. When the player presses the spacebar, the character should jump. Please ensure that the game is easy to operate, with clear graphics, and complies with the functional limitations of pyxel software. Note: pyxel environment already satisfied"
GENERATE_GAMES_REQ_20 = "Create a Snake game. Players need to control the movement of the snake to eat food and grow its body, while avoiding the snake's head touching their own body or game boundaries. Games need to have basic game logic, user interface. During the production process, please consider factors such as playability, beautiful interface, and convenient operation of the game. Note: pyxel environment already satisfied"
ML_BENCHMARK_REQUIREMENTS = {
"01_iris": IRIS_REQ,
"02_wines_recognition": WINES_RECOGNITION_REQ,
"03_breast_cancer": BREAST_CANCER_WISCONSIN_REQ,
"04_titanic": TITANIC_REQ,
"05_house_prices": HOUSE_PRICES_ADVANCED_REGRESSION_TECHNIQUES_REQ,
"06_santander_customer": SANTANDER_CUSTOMER_TRANSACTION_PREDICTION_REQ,
"07_icr_identify": ICR_IDENTITY_AGE_RELATED_CONDITIONS_REQ,
"08_santander_value": SANTANDER_VALUE_PREDICTION_CHALLENGE_REQ,
}
OPEN_ENDED_TASKS_REQUIREMENTS = {
"01_ocr": OCR_REQ_01,
"02_ocr": OCR_REQ_02,
"03_ocr": OCR_REQ_03,
"04_web_search_and_crawling": WEB_SEARCH_AND_CRAWLING_REQ_04,
"05_web_search_and_crawling": WEB_SEARCH_AND_CRAWLING_REQ_05,
"06_web_search_and_crawling": WEB_SEARCH_AND_CRAWLING_REQ_06,
"07_web_search_and_crawling": WEB_SEARCH_AND_CRAWLING_REQ_07,
"08_email_reply": EMAIL_REPLY_REQ_08,
"09_web_page_imitation": WEB_PAGE_IMITATION_REQ_09,
"10_web_page_imitation": WEB_PAGE_IMITATION_REQ_10,
"11_web_page_imitation": WEB_PAGE_IMITATION_REQ_11,
"12_web_page_imitation": WEB_PAGE_IMITATION_REQ_12,
"13_web_page_imitation": WEB_PAGE_IMITATION_REQ_13,
"14_image_background_removal": IMAGE_BACKGROUND_REMOVAL_REQ_14,
"15_text2img": TEXT2IMG_REQ_15,
"16_image_2_code_generation": IMAGE2CODE_GENERATION_REQ_16,
"17_image_2_code_generation": IMAGE2CODE_GENERATION_REQ_17,
"18_generate_games": GENERATE_GAMES_REQ_18,
"19_generate_games": GENERATE_GAMES_REQ_19,
"20_generate_games": GENERATE_GAMES_REQ_20,
}
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/di/run_open_ended_tasks.py | examples/di/run_open_ended_tasks.py | import os
import fire
from examples.di.requirements_prompt import OPEN_ENDED_TASKS_REQUIREMENTS
from metagpt.const import DATA_PATH
from metagpt.roles.di.data_interpreter import DataInterpreter
from metagpt.tools.tool_recommend import TypeMatchToolRecommender
# Ensure Open-Ended Tasks dataset has been downloaded before using this example.
async def main(task_name, data_dir=DATA_PATH, use_reflection=True):
if data_dir != DATA_PATH and not os.path.exists(os.path.join(data_dir, "di_dataset/open_ended_tasks")):
raise FileNotFoundError(f"Open-ended task dataset not found in {data_dir}.")
requirement = OPEN_ENDED_TASKS_REQUIREMENTS[task_name].format(data_dir=data_dir)
di = DataInterpreter(use_reflection=use_reflection, tool_recommender=TypeMatchToolRecommender(tools=["<all>"]))
await di.run(requirement)
if __name__ == "__main__":
fire.Fire(main)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/di/data_analyst_write_code.py | examples/di/data_analyst_write_code.py | import fire
from metagpt.logs import logger
from metagpt.roles.di.data_analyst import DataAnalyst
async def main():
# Create an instance of DataAnalyst role
analyst = DataAnalyst()
# Set the main goal for the planner - constructing a 2D array
analyst.planner.plan.goal = "construct a two-dimensional array"
# Add a specific task to the planner with detailed parameters:
# - task_id: Unique identifier for the task
# - dependent_task_ids: List of tasks that need to be completed before this one (empty in this case)
# - instruction: Description of what needs to be done
# - assignee: Who will execute the task (David)
# - task_type: Category of the task (DATA_ANALYSIS)
analyst.planner.plan.append_task(
task_id="1",
dependent_task_ids=[],
instruction="construct a two-dimensional array",
assignee="David",
task_type="DATA_ANALYSIS",
)
# Execute the code generation and execution for creating a 2D array
# The write_and_exec_code method will:
# 1. Generate the necessary code for creating a 2D array
# 2. Execute the generated code
# 3. Return the result
result = await analyst.write_and_exec_code("construct a two-dimensional array")
# Log the result of the code execution
logger.info(result)
if __name__ == "__main__":
fire.Fire(main)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/di/use_github_repo.py | examples/di/use_github_repo.py | import asyncio
from metagpt.roles.di.data_interpreter import DataInterpreter
USE_GOT_REPO_REQ = """
This is a link to the GOT github repo: https://github.com/spcl/graph-of-thoughts.git.
Clone it, read the README to understand the usage, install it, and finally run the quick start example.
**Note the config for LLM is at `config/config_got.json`, it's outside the repo path, before using it, you need to copy it into graph-of-thoughts.
** Don't write all codes in one response, each time, just write code for one step.
"""
async def main():
di = DataInterpreter(tools=["Terminal"])
await di.run(USE_GOT_REPO_REQ)
if __name__ == "__main__":
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/di/rm_image_background.py | examples/di/rm_image_background.py | import asyncio
from metagpt.const import DEFAULT_WORKSPACE_ROOT, EXAMPLE_DATA_PATH
from metagpt.roles.di.data_interpreter import DataInterpreter
async def main(requirement: str = ""):
di = DataInterpreter()
await di.run(requirement)
if __name__ == "__main__":
image_path = EXAMPLE_DATA_PATH / "di/dog.jpg"
save_path = DEFAULT_WORKSPACE_ROOT / "image_rm_bg.png"
requirement = f"This is a image, you need to use python toolkit rembg to remove the background of the image and save the result. image path:{image_path}; save path:{save_path}."
asyncio.run(main(requirement))
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/di/interacting_with_human.py | examples/di/interacting_with_human.py | import fire
from metagpt.environment.mgx.mgx_env import MGXEnv
from metagpt.logs import logger
from metagpt.roles.di.team_leader import TeamLeader
from metagpt.schema import Message
async def main():
# Initialize the MetaGPT environment
env = MGXEnv()
# Add a TeamLeader role to the environment
env.add_roles([TeamLeader()])
# Get input from human user about what they want to do
human_rsp = await env.ask_human("What do you want to do?")
# Log the human response for tracking
logger.info(human_rsp)
# Create and publish a message with the human response in the environment
env.publish_message(Message(content=human_rsp, role="user"))
# Get the TeamLeader role instance named 'Mike'
tl = env.get_role("Mike")
# Execute the TeamLeader's tasks
await tl.run()
# Log information about each task in the TeamLeader's plan
for task in tl.planner.plan.tasks:
logger.info(f"- {task.task_id}: {task.instruction} (Completed: {task.is_finished})")
# Send an empty response back to the human and log it
resp = await env.reply_to_human("")
logger.info(resp)
if __name__ == "__main__":
fire.Fire(main)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/di/sd_tool_usage.py | examples/di/sd_tool_usage.py | # -*- coding: utf-8 -*-
# @Date : 1/11/2024 7:06 PM
# @Author : stellahong (stellahong@fuzhi.ai)
# @Desc :
import asyncio
from metagpt.roles.di.data_interpreter import DataInterpreter
async def main(requirement: str = ""):
di = DataInterpreter(tools=["SDEngine"])
await di.run(requirement)
if __name__ == "__main__":
sd_url = "http://your.sd.service.ip:port"
requirement = (
f"I want to generate an image of a beautiful girl using the stable diffusion text2image tool, sd_url={sd_url}"
)
asyncio.run(main(requirement))
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/di/fix_github_issue.py | examples/di/fix_github_issue.py | """This example is from a real issue from MetaGPT: https://github.com/geekan/MetaGPT/issues/1067 with corresponding bugfix as https://github.com/geekan/MetaGPT/pull/1069
We demonstrate that DataInterpreter has the capability to fix such issues.
Prerequisite: You need to manually add the bug back to your local file metagpt/utils/repair_llm_raw_output.py to test DataInterpreter's debugging ability. For detail, please check the issue and PR link above.
"""
import asyncio
from metagpt.roles.di.data_interpreter import DataInterpreter
REQ = """
# Requirement
Below is a github issue, solve it. Use Editor to search for the function, understand it, and modify the relevant code.
Write a new test file test.py with Editor and use Terminal to python the test file to ensure you have fixed the issue.
When writing test.py, you should import the function from the file you modified and test it with the given input.
Notice: Don't write all codes in one response, each time, just write code for one step.
# Issue
>> s = "-1"
>> print(extract_state_value_from_output(s))
>> 1
The extract_state_value_from_output function will process -1 into 1,
resulted in an infinite loop for the react mode.
"""
async def main():
di = DataInterpreter(tools=["Terminal", "Editor"], react_mode="react")
await di.run(REQ)
if __name__ == "__main__":
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/di/run_ml_benchmark.py | examples/di/run_ml_benchmark.py | import os
import fire
from examples.di.requirements_prompt import ML_BENCHMARK_REQUIREMENTS
from metagpt.const import DATA_PATH
from metagpt.roles.di.data_interpreter import DataInterpreter
from metagpt.tools.tool_recommend import TypeMatchToolRecommender
# Ensure ML-Benchmark dataset has been downloaded before using these example.
async def main(task_name, data_dir=DATA_PATH, use_reflection=True):
if data_dir != DATA_PATH and not os.path.exists(os.path.join(data_dir, "di_dataset/ml_benchmark")):
raise FileNotFoundError(f"ML-Benchmark dataset not found in {data_dir}.")
requirement = ML_BENCHMARK_REQUIREMENTS[task_name].format(data_dir=data_dir)
di = DataInterpreter(use_reflection=use_reflection, tool_recommender=TypeMatchToolRecommender(tools=["<all>"]))
await di.run(requirement)
if __name__ == "__main__":
fire.Fire(main)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/di/solve_math_problems.py | examples/di/solve_math_problems.py | import asyncio
from metagpt.roles.di.data_interpreter import DataInterpreter
async def main(requirement: str = ""):
di = DataInterpreter()
await di.run(requirement)
if __name__ == "__main__":
requirement = "Solve this math problem: The greatest common divisor of positive integers m and n is 6. The least common multiple of m and n is 126. What is the least possible value of m + n?"
# answer: 60 (m = 18, n = 42)
asyncio.run(main(requirement))
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.