repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/test_code_parser.py | tests/metagpt/utils/test_code_parser.py | #!/usr/bin/env python
# coding: utf-8
"""
@Time : 2023/7/10 17:14
@Author : chengmaoyu
@File : test_code_parser.py
"""
import pytest
from metagpt.utils.common import CodeParser
t_text = '''
## Required Python third-party packages
```python
"""
flask==1.1.2
pygame==2.0.1
"""
```
## Required Other language third-party packages
```python
"""
No third-party packages required for other languages.
"""
```
## Full API spec
```python
"""
openapi: 3.0.0
info:
title: Web Snake Game API
version: 1.0.0
paths:
/game:
get:
summary: Get the current game state
responses:
'200':
description: A JSON object of the game state
post:
summary: Send a command to the game
requestBody:
required: true
content:
application/json:
schema:
type: object
properties:
command:
type: string
responses:
'200':
description: A JSON object of the updated game state
"""
```
## Logic Analysis
```python
[
("app.py", "Main entry point for the Flask application. Handles HTTP requests and responses."),
("game.py", "Contains the Game and Snake classes. Handles the game logic."),
("static/js/script.js", "Handles user interactions and updates the game UI."),
("static/css/styles.css", "Defines the styles for the game UI."),
("templates/index.html", "The main page of the web application. Displays the game UI.")
]
```
## Task list
```python
[
"game.py",
"app.py",
"static/css/styles.css",
"static/js/script.js",
"templates/index.html"
]
```
## Shared Knowledge
```python
"""
'game.py' contains the Game and Snake classes which are responsible for the game logic. The Game class uses an instance of the Snake class.
'app.py' is the main entry point for the Flask application. It creates an instance of the Game class and handles HTTP requests and responses.
'static/js/script.js' is responsible for handling user interactions and updating the game UI based on the game state returned by 'app.py'.
'static/css/styles.css' defines the styles for the game UI.
'templates/index.html' is the main page of the web application. It displays the game UI and loads 'static/js/script.js' and 'static/css/styles.css'.
"""
```
## Anything UNCLEAR
We need clarification on how the high score should be stored. Should it persist across sessions (stored in a database or a file) or should it reset every time the game is restarted? Also, should the game speed increase as the snake grows, or should it remain constant throughout the game?
'''
class TestCodeParser:
@pytest.fixture
def parser(self):
return CodeParser()
@pytest.fixture
def text(self):
return t_text
def test_parse_blocks(self, parser, text):
result = parser.parse_blocks(text)
print(result)
assert "game.py" in result["Task list"]
def test_parse_block(self, parser, text):
result = parser.parse_block("Task list", text)
print(result)
assert "game.py" in result
def test_parse_code(self, parser, text):
result = parser.parse_code(block="Task list", text=text, lang="python")
print(result)
assert "game.py" in result
def test_parse_str(self, parser, text):
result = parser.parse_str("Anything UNCLEAR", text, "python")
print(result)
assert "We need clarification on how the high score " in result
def test_parse_file_list(self, parser, text):
result = parser.parse_file_list("Task list", text)
print(result)
assert "game.py" in result
if __name__ == "__main__":
t = TestCodeParser()
t.test_parse_file_list(CodeParser(), t_text)
# TestCodeParser.test_parse_file_list()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/test_redis.py | tests/metagpt/utils/test_redis.py | #!/usr/bin/env python3
# _*_ coding: utf-8 _*_
"""
@Time : 2023/12/27
@Author : mashenquan
@File : test_redis.py
"""
from unittest.mock import AsyncMock
import pytest
from metagpt.utils.redis import Redis
@pytest.mark.asyncio
async def test_redis(mocker):
async def async_mock_from_url(*args, **kwargs):
mock_client = AsyncMock()
mock_client.set.return_value = None
mock_client.get.return_value = b"test"
return mock_client
mocker.patch("aioredis.from_url", return_value=async_mock_from_url())
mock_config = mocker.Mock()
mock_config.to_url.return_value = "http://mock.com"
mock_config.username = "mockusername"
mock_config.password = "mockpwd"
mock_config.db = "0"
conn = Redis(mock_config)
await conn.set("test", "test", timeout_sec=0)
assert await conn.get("test") == b"test"
await conn.close()
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/test_serialize.py | tests/metagpt/utils/test_serialize.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Desc : the unittest of serialize
"""
from typing import List
from metagpt.actions import WritePRD
from metagpt.actions.action_node import ActionNode
from metagpt.schema import Message
from metagpt.utils.serialize import (
actionoutout_schema_to_mapping,
deserialize_message,
serialize_message,
)
def test_actionoutout_schema_to_mapping():
schema = {"title": "test", "type": "object", "properties": {"field": {"title": "field", "type": "string"}}}
mapping = actionoutout_schema_to_mapping(schema)
assert mapping["field"] == (str, ...)
schema = {
"title": "test",
"type": "object",
"properties": {"field": {"title": "field", "type": "array", "items": {"type": "string"}}},
}
mapping = actionoutout_schema_to_mapping(schema)
assert mapping["field"] == (list[str], ...)
schema = {
"title": "test",
"type": "object",
"properties": {
"field": {
"title": "field",
"type": "array",
"items": {
"type": "array",
"minItems": 2,
"maxItems": 2,
"items": [{"type": "string"}, {"type": "string"}],
},
}
},
}
mapping = actionoutout_schema_to_mapping(schema)
assert mapping["field"] == (list[list[str]], ...)
assert True, True
def test_serialize_and_deserialize_message():
out_mapping = {"field1": (str, ...), "field2": (List[str], ...)}
out_data = {"field1": "field1 value", "field2": ["field2 value1", "field2 value2"]}
ic_obj = ActionNode.create_model_class("prd", out_mapping)
message = Message(
content="prd demand", instruct_content=ic_obj(**out_data), role="user", cause_by=WritePRD
) # WritePRD as test action
message_ser = serialize_message(message)
new_message = deserialize_message(message_ser)
assert new_message.content == message.content
assert new_message.cause_by == message.cause_by
assert new_message.instruct_content.field1 == out_data["field1"]
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/test_file.py | tests/metagpt/utils/test_file.py | #!/usr/bin/env python3
# _*_ coding: utf-8 _*_
"""
@Time : 2023/9/4 15:40:40
@Author : Stitch-z
@File : test_file.py
"""
from pathlib import Path
import pytest
from metagpt.utils.file import File
@pytest.mark.asyncio
@pytest.mark.parametrize(
("root_path", "filename", "content"),
[
(
Path(__file__).parent / "../../../workspace/unittest/data/tutorial_docx/2023-09-07_17-05-20",
"test.md",
"Hello World!",
)
],
)
async def test_write_and_read_file(root_path: Path, filename: str, content: bytes):
full_file_name = await File.write(root_path=root_path, filename=filename, content=content.encode("utf-8"))
assert isinstance(full_file_name, Path)
assert root_path / filename == full_file_name
file_data = await File.read(full_file_name)
assert file_data.decode("utf-8") == content
@pytest.mark.asyncio
async def test_read_chunk():
val = await File.read(file_path=__file__, chunk_size=10)
assert val
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/test_s3.py | tests/metagpt/utils/test_s3.py | #!/usr/bin/env python3
# _*_ coding: utf-8 _*_
"""
@Time : 2023/12/27
@Author : mashenquan
@File : test_s3.py
"""
import uuid
from pathlib import Path
import aioboto3
import pytest
from metagpt.config2 import Config
from metagpt.configs.s3_config import S3Config
from metagpt.utils.common import aread
from metagpt.utils.s3 import S3
@pytest.mark.asyncio
async def test_s3(mocker):
# Set up the mock response
data = await aread(__file__, "utf-8")
reader_mock = mocker.AsyncMock()
reader_mock.read.side_effect = [data.encode("utf-8"), b"", data.encode("utf-8")]
type(reader_mock).url = mocker.PropertyMock(return_value="https://mock")
mock_client = mocker.AsyncMock()
mock_client.put_object.return_value = None
mock_client.get_object.return_value = {"Body": reader_mock}
mock_client.__aenter__.return_value = mock_client
mock_client.__aexit__.return_value = None
mocker.patch.object(aioboto3.Session, "client", return_value=mock_client)
mock_config = mocker.Mock()
mock_config.s3 = S3Config(
access_key="mock_access_key",
secret_key="mock_secret_key",
endpoint="http://mock.endpoint",
bucket="mock_bucket",
)
mocker.patch.object(Config, "default", return_value=mock_config)
# Prerequisites
s3 = Config.default().s3
assert s3
conn = S3(s3)
object_name = "unittest.bak"
await conn.upload_file(bucket=s3.bucket, local_path=__file__, object_name=object_name)
pathname = (Path(__file__).parent / "../../../workspace/unittest" / uuid.uuid4().hex).with_suffix(".bak")
pathname.unlink(missing_ok=True)
await conn.download_file(bucket=s3.bucket, object_name=object_name, local_path=str(pathname))
assert pathname.exists()
url = await conn.get_object_url(bucket=s3.bucket, object_name=object_name)
assert url
bin_data = await conn.get_object(bucket=s3.bucket, object_name=object_name)
assert bin_data
data = await aread(filename=__file__)
res = await conn.cache(data, ".bak", "script")
assert "http" in res
# Mock session env
s3.access_key = "ABC"
type(reader_mock).url = mocker.PropertyMock(return_value="")
try:
conn = S3(s3)
res = await conn.cache("ABC", ".bak", "script")
assert not res
except Exception:
pass
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/test_read_docx.py | tests/metagpt/utils/test_read_docx.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/4/29 16:02
@Author : alexanderwu
@File : test_read_docx.py
"""
import pytest
from metagpt.const import METAGPT_ROOT
from metagpt.utils.read_document import read_docx
@pytest.mark.skip # https://copyprogramming.com/howto/python-docx-error-opening-file-bad-magic-number-for-file-header-eoferror
class TestReadDocx:
def test_read_docx(self):
docx_sample = METAGPT_ROOT / "tests/data/docx_for_test.docx"
docx = read_docx(docx_sample)
assert len(docx) == 6
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/test_git_repository.py | tests/metagpt/utils/test_git_repository.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/11/20
@Author : mashenquan
@File : test_git_repository.py
@Desc: Unit tests for git_repository.py
"""
import shutil
from pathlib import Path
import pytest
from metagpt.utils.common import awrite
from metagpt.utils.git_repository import GitRepository
async def mock_file(filename, content=""):
await awrite(filename=filename, data=content)
async def mock_repo(local_path) -> (GitRepository, Path):
if local_path.exists():
shutil.rmtree(local_path)
assert not local_path.exists()
repo = GitRepository(local_path=local_path, auto_init=True)
assert local_path.exists()
assert local_path == repo.workdir
assert not repo.changed_files
await mock_file(local_path / "a.txt")
await mock_file(local_path / "b.txt")
subdir = local_path / "subdir"
subdir.mkdir(parents=True, exist_ok=True)
await mock_file(subdir / "c.txt")
return repo, subdir
@pytest.mark.asyncio
async def test_git():
local_path = Path(__file__).parent / "git"
repo, subdir = await mock_repo(local_path)
assert len(repo.changed_files) == 3
repo.add_change(repo.changed_files)
repo.commit("commit1")
assert not repo.changed_files
await mock_file(local_path / "a.txt", "tests")
await mock_file(subdir / "d.txt")
rmfile = local_path / "b.txt"
rmfile.unlink()
assert repo.status
assert len(repo.changed_files) == 3
repo.add_change(repo.changed_files)
repo.commit("commit2")
assert not repo.changed_files
assert repo.status
exist_dir = repo.workdir / "git4"
exist_dir.mkdir(parents=True, exist_ok=True)
repo.rename_root("git4")
assert repo.workdir.name == "git4"
repo.delete_repository()
assert not local_path.exists()
@pytest.mark.asyncio
async def test_git1():
local_path = Path(__file__).parent / "git1"
await mock_repo(local_path)
repo1 = GitRepository(local_path=local_path, auto_init=False)
assert repo1.changed_files
file_repo = repo1.new_file_repository("__pycache__")
await file_repo.save("a.pyc", content="")
all_files = repo1.get_files(relative_path=".", filter_ignored=False)
assert "__pycache__/a.pyc" in all_files
all_files = repo1.get_files(relative_path=".", filter_ignored=True)
assert "__pycache__/a.pyc" not in all_files
res = repo1.filter_gitignore(filenames=["snake_game/snake_game/__pycache__", "snake_game/snake_game/game.py"])
assert res == ["snake_game/snake_game/game.py"]
repo1.delete_repository()
assert not local_path.exists()
@pytest.mark.asyncio
async def test_dependency_file():
local_path = Path(__file__).parent / "git2"
repo, subdir = await mock_repo(local_path)
dependancy_file = await repo.get_dependency()
assert not dependancy_file.exists
await dependancy_file.update(filename="a/b.txt", dependencies={"c/d.txt", "e/f.txt"})
assert dependancy_file.exists
repo.delete_repository()
assert not dependancy_file.exists
@pytest.mark.asyncio
async def test_git_open():
local_path = Path(__file__).parent / "git3"
local_path.mkdir(exist_ok=True, parents=True)
assert not GitRepository.is_git_dir(local_path)
repo = GitRepository()
repo.open(local_path, auto_init=False)
assert not repo.is_valid
assert not repo.status
assert not repo.workdir
shutil.rmtree(path=str(local_path), ignore_errors=True)
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/test_custom_decoder.py | tests/metagpt/utils/test_custom_decoder.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/9/8 11:38
@Author : femto Zheng
@File : test_custom_decoder.py
"""
import pytest
from metagpt.utils.custom_decoder import CustomDecoder
def test_parse_single_quote():
# Create a custom JSON decoder
decoder = CustomDecoder(strict=False)
# Your provided input with single-quoted strings and line breaks
input_data = """{'a"
b':'"title": "Reach and engagement of campaigns",
"x-axis": "Low Reach --> High Reach",
"y-axis": "Low Engagement --> High Engagement",
"quadrant-1": "We should expand",
"quadrant-2": "Need to promote",
"quadrant-3": "Re-evaluate",
"quadrant-4": "May be improved",
"Campaign: A": [0.3, 0.6],
"Campaign B": [0.45, 0.23],
"Campaign C": [0.57, 0.69],
"Campaign D": [0.78, 0.34],
"Campaign E": [0.40, 0.34],
"Campaign F": [0.35, 0.78],
"Our Target Product": [0.5, 0.6]
'
}
"""
# Parse the JSON using the custom decoder
parsed_data = decoder.decode(input_data)
assert 'a"\n b' in parsed_data
input_data = """{
'a': "
b
"
}
"""
with pytest.raises(Exception):
parsed_data = decoder.decode(input_data)
input_data = """{
'a': '
b
'
}
"""
with pytest.raises(Exception):
parsed_data = decoder.decode(input_data)
def test_parse_double_quote():
decoder = CustomDecoder(strict=False)
input_data = """{
"a": "
b
"
}
"""
parsed_data = decoder.decode(input_data)
assert parsed_data["a"] == "\n b\n"
input_data = """{
"a": '
b
'
}
"""
parsed_data = decoder.decode(input_data)
assert parsed_data["a"] == "\n b\n"
def test_parse_triple_double_quote():
# Create a custom JSON decoder
decoder = CustomDecoder(strict=False)
# Your provided input with single-quoted strings and line breaks
input_data = '{"""a""":"b"}'
# Parse the JSON using the custom decoder
parsed_data = decoder.decode(input_data)
assert "a" in parsed_data
input_data = '{"""a""":"""b"""}'
# Parse the JSON using the custom decoder
parsed_data = decoder.decode(input_data)
assert parsed_data["a"] == "b"
input_data = "{\"\"\"a\"\"\": '''b'''}"
parsed_data = decoder.decode(input_data)
assert parsed_data["a"] == "b"
def test_parse_triple_single_quote():
# Create a custom JSON decoder
decoder = CustomDecoder(strict=False)
# Your provided input with single-quoted strings and line breaks
input_data = "{'''a''':'b'}"
# Parse the JSON using the custom decoder
parsed_data = decoder.decode(input_data)
assert "a" in parsed_data
input_data = "{'''a''':'''b'''}"
# Parse the JSON using the custom decoder
parsed_data = decoder.decode(input_data)
assert parsed_data["a"] == "b"
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/test_token_counter.py | tests/metagpt/utils/test_token_counter.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/24 17:54
@Author : alexanderwu
@File : test_token_counter.py
"""
import pytest
from metagpt.utils.token_counter import count_message_tokens, count_output_tokens
def test_count_message_tokens():
messages = [
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi there!"},
]
assert count_message_tokens(messages) == 15
def test_count_message_tokens_with_name():
messages = [
{"role": "user", "content": "Hello", "name": "John"},
{"role": "assistant", "content": "Hi there!"},
]
assert count_message_tokens(messages) == 17
def test_count_message_tokens_empty_input():
"""Empty input should return 3 tokens"""
assert count_message_tokens([]) == 3
def test_count_message_tokens_invalid_model():
"""Invalid model should raise a KeyError"""
messages = [
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi there!"},
]
with pytest.raises(NotImplementedError):
count_message_tokens(messages, model="invalid_model")
def test_count_message_tokens_gpt_4():
messages = [
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi there!"},
]
assert count_message_tokens(messages, model="gpt-4-0314") == 15
def test_count_string_tokens():
"""Test that the string tokens are counted correctly."""
string = "Hello, world!"
assert count_output_tokens(string, model="gpt-3.5-turbo-0301") == 4
def test_count_string_tokens_empty_input():
"""Test that the string tokens are counted correctly."""
assert count_output_tokens("", model="gpt-3.5-turbo-0301") == 0
def test_count_string_tokens_gpt_4():
"""Test that the string tokens are counted correctly."""
string = "Hello, world!"
assert count_output_tokens(string, model="gpt-4-0314") == 4
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/test_common.py | tests/metagpt/utils/test_common.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/4/29 16:19
@Author : alexanderwu
@File : test_common.py
@Modified by: mashenquan, 2023/11/21. Add unit tests.
"""
import importlib
import os
import platform
import uuid
from pathlib import Path
from typing import Any, Set
import pytest
from pydantic import BaseModel
from metagpt.actions import RunCode
from metagpt.const import get_metagpt_root
from metagpt.roles.tutorial_assistant import TutorialAssistant
from metagpt.schema import Message
from metagpt.utils.common import (
NoMoneyException,
OutputParser,
any_to_str,
any_to_str_set,
aread,
awrite,
check_cmd_exists,
concat_namespace,
extract_and_encode_images,
extract_image_paths,
import_class_inst,
parse_recipient,
print_members,
read_file_block,
read_json_file,
require_python_version,
split_namespace,
)
class TestGetProjectRoot:
def change_etc_dir(self):
# current_directory = Path.cwd()
abs_root = "/etc"
os.chdir(abs_root)
def test_get_project_root(self):
project_root = get_metagpt_root()
src_path = project_root / "metagpt"
assert src_path.exists()
def test_get_root_exception(self):
self.change_etc_dir()
project_root = get_metagpt_root()
assert project_root
def test_any_to_str(self):
class Input(BaseModel):
x: Any = None
want: str
inputs = [
Input(x=TutorialAssistant, want="metagpt.roles.tutorial_assistant.TutorialAssistant"),
Input(x=TutorialAssistant(), want="metagpt.roles.tutorial_assistant.TutorialAssistant"),
Input(x=RunCode, want="metagpt.actions.run_code.RunCode"),
Input(x=RunCode(), want="metagpt.actions.run_code.RunCode"),
Input(x=Message, want="metagpt.schema.Message"),
Input(x=Message(content=""), want="metagpt.schema.Message"),
Input(x="A", want="A"),
]
for i in inputs:
v = any_to_str(i.x)
assert v == i.want
def test_any_to_str_set(self):
class Input(BaseModel):
x: Any = None
want: Set
inputs = [
Input(
x=[TutorialAssistant, RunCode(), "a"],
want={"metagpt.roles.tutorial_assistant.TutorialAssistant", "metagpt.actions.run_code.RunCode", "a"},
),
Input(
x={TutorialAssistant, "a"},
want={"metagpt.roles.tutorial_assistant.TutorialAssistant", "a"},
),
Input(
x=(TutorialAssistant, RunCode(), "a"),
want={"metagpt.roles.tutorial_assistant.TutorialAssistant", "metagpt.actions.run_code.RunCode", "a"},
),
Input(
x={"a": TutorialAssistant, "b": RunCode(), "c": "a"},
want={"a", "metagpt.roles.tutorial_assistant.TutorialAssistant", "metagpt.actions.run_code.RunCode"},
),
]
for i in inputs:
v = any_to_str_set(i.x)
assert v == i.want
def test_check_cmd_exists(self):
class Input(BaseModel):
command: str
platform: str
inputs = [
{"command": "cat", "platform": "linux"},
{"command": "ls", "platform": "linux"},
{"command": "mspaint", "platform": "windows"},
]
plat = "windows" if platform.system().lower() == "windows" else "linux"
for i in inputs:
seed = Input(**i)
result = check_cmd_exists(seed.command)
if plat == seed.platform:
assert result == 0
else:
assert result != 0
@pytest.mark.parametrize(("filename", "want"), [("1.md", "File list"), ("2.md", "Language"), ("3.md", "# TODOs")])
@pytest.mark.asyncio
async def test_parse_data_exception(self, filename, want):
pathname = Path(__file__).parent.parent.parent / "data/output_parser" / filename
assert pathname.exists()
data = await aread(filename=pathname)
result = OutputParser.parse_data(data=data)
assert want in result
@pytest.mark.parametrize(
("ver", "want", "err"), [((1, 2, 3, 4), False, True), ((2, 3, 9), True, False), ((3, 10, 18), False, False)]
)
def test_require_python_version(self, ver, want, err):
try:
res = require_python_version(ver)
assert res == want
except ValueError:
assert err
def test_no_money_exception(self):
val = NoMoneyException(3.10)
assert "Amount required:" in str(val)
@pytest.mark.parametrize("module_path", ["tests.metagpt.utils.test_common"])
def test_print_members(self, module_path):
module = importlib.import_module(module_path)
with pytest.raises(Exception) as info:
print_members(module)
assert info is None
@pytest.mark.parametrize(
("words", "want"), [("", ""), ("## Send To: Engineer", "Engineer"), ("Send To: \nNone", "None")]
)
def test_parse_recipient(self, words, want):
res = parse_recipient(words)
assert want == res
def test_concat_namespace(self):
assert concat_namespace("a", "b", "c") == "a:b:c"
assert concat_namespace("a", "b", "c", "e") == "a:b:c:e"
assert concat_namespace("a", "b", "c", "e", "f") == "a:b:c:e:f"
@pytest.mark.parametrize(
("val", "want"),
[
(
"tests/metagpt/test_role.py:test_react:Input:subscription",
["tests/metagpt/test_role.py", "test_react", "Input", "subscription"],
),
(
"tests/metagpt/test_role.py:test_react:Input:goal",
["tests/metagpt/test_role.py", "test_react", "Input", "goal"],
),
],
)
def test_split_namespace(self, val, want):
res = split_namespace(val, maxsplit=-1)
assert res == want
def test_read_json_file(self):
assert read_json_file(str(Path(__file__).parent / "../../data/ut_writer/yft_swaggerApi.json"), encoding="utf-8")
with pytest.raises(FileNotFoundError):
read_json_file("not_exists_file", encoding="utf-8")
with pytest.raises(ValueError):
read_json_file(__file__, encoding="utf-8")
def test_import_class_inst(self):
rc = import_class_inst("RunCode", "metagpt.actions.run_code", name="X")
assert rc.name == "X"
@pytest.mark.asyncio
async def test_read_file_block(self):
assert await read_file_block(filename=__file__, lineno=6, end_lineno=6) == "@File : test_common.py\n"
@pytest.mark.asyncio
async def test_read_write(self):
pathname = Path(__file__).parent / f"../../../workspace/unittest/{uuid.uuid4().hex}" / "test.tmp"
await awrite(pathname, "ABC")
data = await aread(pathname)
assert data == "ABC"
pathname.unlink(missing_ok=True)
@pytest.mark.asyncio
async def test_read_write_error_charset(self):
pathname = Path(__file__).parent / f"../../../workspace/unittest/{uuid.uuid4().hex}" / "test.txt"
content = "中国abc123\u27f6"
await awrite(filename=pathname, data=content)
data = await aread(filename=pathname)
assert data == content
content = "GB18030 是中国国家标准局发布的新一代中文字符集标准,是 GBK 的升级版,支持更广泛的字符范围。"
await awrite(filename=pathname, data=content, encoding="gb2312")
data = await aread(filename=pathname, encoding="utf-8")
assert data == content
def test_extract_image_paths():
content = """
Here are some image paths /home/user/images/photo1.jpg /home/user/images/photo2.png
# /absolute/path/to/image.gif"""
assert extract_image_paths(content) == [
"/home/user/images/photo1.jpg",
"/home/user/images/photo2.png",
"/absolute/path/to/image.gif",
]
content = "no image path"
assert not extract_image_paths(content)
def test_extract_and_encode_images():
assert not extract_and_encode_images("a non-existing.jpg")
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/test_dependency_file.py | tests/metagpt/utils/test_dependency_file.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/11/22
@Author : mashenquan
@File : test_dependency_file.py
@Desc: Unit tests for dependency_file.py
"""
from __future__ import annotations
from pathlib import Path
from typing import Optional, Set, Union
import pytest
from pydantic import BaseModel
from metagpt.utils.dependency_file import DependencyFile
@pytest.mark.asyncio
async def test_dependency_file():
class Input(BaseModel):
x: Union[Path, str]
deps: Optional[Set[Union[Path, str]]] = None
key: Optional[Union[Path, str]] = None
want: Set[str]
inputs = [
Input(x="a/b.txt", deps={"c/e.txt", Path(__file__).parent / "d.txt"}, want={"c/e.txt", "d.txt"}),
Input(
x=Path(__file__).parent / "x/b.txt",
deps={"s/e.txt", Path(__file__).parent / "d.txt"},
key="x/b.txt",
want={"s/e.txt", "d.txt"},
),
Input(x="f.txt", deps=None, want=set()),
Input(x="a/b.txt", deps=None, want=set()),
]
file = DependencyFile(workdir=Path(__file__).parent)
for i in inputs:
await file.update(filename=i.x, dependencies=i.deps)
assert await file.get(filename=i.key or i.x) == i.want
file2 = DependencyFile(workdir=Path(__file__).parent)
file2.delete_file()
assert not file.exists
await file2.update(filename="a/b.txt", dependencies={"c/e.txt", Path(__file__).parent / "d.txt"}, persist=False)
assert not file.exists
await file2.save()
assert file2.exists
file1 = DependencyFile(workdir=Path(__file__).parent)
assert file1.exists
assert await file1.get("a/b.txt", persist=False) == set()
assert await file1.get("a/b.txt") == {"c/e.txt", "d.txt"}
await file1.load()
assert await file1.get("a/b.txt") == {"c/e.txt", "d.txt"}
file1.delete_file()
assert not file.exists
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/test_repo_to_markdown.py | tests/metagpt/utils/test_repo_to_markdown.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import uuid
from pathlib import Path
import pytest
from metagpt.utils.repo_to_markdown import repo_to_markdown
@pytest.mark.parametrize(
["repo_path", "output"],
[
(
Path(__file__).parent.parent.parent,
Path(__file__).parent / f"../../../workspace/unittest/{uuid.uuid4().hex}.md",
),
],
)
@pytest.mark.asyncio
async def test_repo_to_markdown(repo_path: Path, output: Path):
markdown = await repo_to_markdown(repo_path=repo_path, output=output)
assert output.exists()
assert markdown
output.unlink(missing_ok=True)
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/test_repair_llm_raw_output.py | tests/metagpt/utils/test_repair_llm_raw_output.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : unittest of repair_llm_raw_output
from metagpt.config2 import config
"""
CONFIG.repair_llm_output should be True before retry_parse_json_text imported.
so we move `from ... impot ...` into each `test_xx` to avoid `Module level import not at top of file` format warning.
"""
config.repair_llm_output = True
def test_repair_case_sensitivity():
from metagpt.utils.repair_llm_raw_output import repair_llm_raw_output
raw_output = """{
"Original requirements": "Write a 2048 game",
"search Information": "",
"competitive Quadrant charT": "quadrantChart
Campaign A: [0.3, 0.6]",
"requirement analysis": "The 2048 game should be simple to play"
}"""
target_output = """{
"Original Requirements": "Write a 2048 game",
"Search Information": "",
"Competitive Quadrant Chart": "quadrantChart
Campaign A: [0.3, 0.6]",
"Requirement Analysis": "The 2048 game should be simple to play"
}"""
req_keys = ["Original Requirements", "Search Information", "Competitive Quadrant Chart", "Requirement Analysis"]
output = repair_llm_raw_output(output=raw_output, req_keys=req_keys)
assert output == target_output
def test_repair_special_character_missing():
from metagpt.utils.repair_llm_raw_output import repair_llm_raw_output
raw_output = """[CONTENT]
"Anything UNCLEAR": "No unclear requirements or information."
[CONTENT]"""
target_output = """[CONTENT]
"Anything UNCLEAR": "No unclear requirements or information."
[/CONTENT]"""
req_keys = ["[/CONTENT]"]
output = repair_llm_raw_output(output=raw_output, req_keys=req_keys)
assert output == target_output
raw_output = """[CONTENT] tag
[CONTENT]
{
"Anything UNCLEAR": "No unclear requirements or information."
}
[CONTENT]"""
target_output = """[CONTENT] tag
[CONTENT]
{
"Anything UNCLEAR": "No unclear requirements or information."
}
[/CONTENT]"""
output = repair_llm_raw_output(output=raw_output, req_keys=req_keys)
assert output == target_output
raw_output = '[CONTENT] {"a": "b"} [CONTENT]'
target_output = '[CONTENT] {"a": "b"} [/CONTENT]'
output = repair_llm_raw_output(output=raw_output, req_keys=["[/CONTENT]"])
assert output == target_output
def test_required_key_pair_missing():
from metagpt.utils.repair_llm_raw_output import repair_llm_raw_output
raw_output = '[CONTENT] {"a": "b"}'
target_output = '[CONTENT] {"a": "b"}\n[/CONTENT]'
output = repair_llm_raw_output(output=raw_output, req_keys=["[/CONTENT]"])
assert output == target_output
raw_output = """[CONTENT]
{
"key": "value"
]"""
target_output = """[CONTENT]
{
"key": "value"
]
[/CONTENT]"""
output = repair_llm_raw_output(output=raw_output, req_keys=["[/CONTENT]"])
assert output == target_output
raw_output = """[CONTENT] tag
[CONTENT]
{
"key": "value"
}
xxx
"""
target_output = """[CONTENT]
{
"key": "value"
}
[/CONTENT]"""
output = repair_llm_raw_output(output=raw_output, req_keys=["[/CONTENT]"])
assert output == target_output
def test_repair_json_format():
from metagpt.utils.repair_llm_raw_output import RepairType, repair_llm_raw_output
raw_output = "{ xxx }]"
target_output = "{ xxx }"
output = repair_llm_raw_output(output=raw_output, req_keys=[None], repair_type=RepairType.JSON)
assert output == target_output
raw_output = "[{ xxx }"
target_output = "{ xxx }"
output = repair_llm_raw_output(output=raw_output, req_keys=[None], repair_type=RepairType.JSON)
assert output == target_output
raw_output = "{ xxx ]"
target_output = "{ xxx }"
output = repair_llm_raw_output(output=raw_output, req_keys=[None], repair_type=RepairType.JSON)
assert output == target_output
raw_output = """
{
"Language": "en_us", # define language
"Programming Language": "Python"
}
"""
target_output = """{
"Language": "en_us",
"Programming Language": "Python"
}"""
output = repair_llm_raw_output(output=raw_output, req_keys=[None], repair_type=RepairType.JSON)
assert output == target_output
raw_output = """
{
"Language": "en_us", // define language
"Programming Language": "Python" # define code language
}
"""
target_output = """{
"Language": "en_us",
"Programming Language": "Python"
}"""
output = repair_llm_raw_output(output=raw_output, req_keys=[None], repair_type=RepairType.JSON)
assert output == target_output
raw_output = """
{
"Language": "#en_us#", // define language
"Programming Language": "//Python # Code // Language//" # define code language
}
"""
target_output = """{
"Language": "#en_us#",
"Programming Language": "//Python # Code // Language//"
}"""
output = repair_llm_raw_output(output=raw_output, req_keys=[None], repair_type=RepairType.JSON)
assert output == target_output
def test_repair_invalid_json():
from metagpt.utils.repair_llm_raw_output import repair_invalid_json
raw_output = """{
"key": "value"
},
}"""
target_output = """{
"key": "value"
,
}"""
output = repair_invalid_json(raw_output, "Expecting ',' delimiter: line 3 column 1")
assert output == target_output
raw_output = """{
"key": "
value
},
}"""
target_output = """{
"key": "
value
",
}"""
output = repair_invalid_json(raw_output, "Expecting ',' delimiter: line 4 column 1")
output = repair_invalid_json(output, "Expecting ',' delimiter: line 4 column 1")
assert output == target_output
raw_output = """{
"key": '
value
},
}"""
target_output = """{
"key": '
value
',
}"""
output = repair_invalid_json(raw_output, "Expecting ',' delimiter: line 4 column 1")
output = repair_invalid_json(output, "Expecting ',' delimiter: line 4 column 1")
output = repair_invalid_json(output, "Expecting ',' delimiter: line 4 column 1")
assert output == target_output
raw_output = '{"key": "url "http" \\"https\\" "}'
target_output = '{"key": "url \\"http\\" \\"https\\" "}'
output = repair_invalid_json(raw_output, "Expecting ',' delimiter: line 1 column 15 (char 14)")
assert output == target_output
def test_retry_parse_json_text():
from metagpt.utils.repair_llm_raw_output import retry_parse_json_text
invalid_json_text = """{
"Original Requirements": "Create a 2048 game",
"Competitive Quadrant Chart": "quadrantChart\n\ttitle Reach and engagement of campaigns\n\t\tx-axis"
],
"Requirement Analysis": "The requirements are clear and well-defined"
}"""
target_json = {
"Original Requirements": "Create a 2048 game",
"Competitive Quadrant Chart": "quadrantChart\n\ttitle Reach and engagement of campaigns\n\t\tx-axis",
"Requirement Analysis": "The requirements are clear and well-defined",
}
output = retry_parse_json_text(output=invalid_json_text)
assert output == target_json
invalid_json_text = """{
"Original Requirements": "Create a 2048 game",
"Competitive Quadrant Chart": "quadrantChart\n\ttitle Reach and engagement of campaigns\n\t\tx-axis"
},
"Requirement Analysis": "The requirements are clear and well-defined"
}"""
target_json = {
"Original Requirements": "Create a 2048 game",
"Competitive Quadrant Chart": "quadrantChart\n\ttitle Reach and engagement of campaigns\n\t\tx-axis",
"Requirement Analysis": "The requirements are clear and well-defined",
}
output = retry_parse_json_text(output=invalid_json_text)
assert output == target_json
invalid_json_text = '''{
"Data structures and interfaces": """
class UI:
- game_engine: GameEngine
+ __init__(engine: GameEngine) -> None
+ display_board() -> None
+ display_score() -> None
+ prompt_move() -> str
+ reset_game() -> None
"""
"Anything UNCLEAR": "no"
}'''
target_json = {
"Data structures and interfaces": "\n class UI:\n - game_engine: GameEngine\n + __init__(engine: GameEngine) -> None\n + display_board() -> None\n + display_score() -> None\n + prompt_move() -> str\n + reset_game() -> None\n ",
"Anything UNCLEAR": "no",
}
output = retry_parse_json_text(output=invalid_json_text)
assert output == target_json
def test_extract_content_from_output():
"""
cases
xxx [CONTENT] xxxx [/CONTENT]
xxx [CONTENT] xxx [CONTENT] xxxx [/CONTENT]
xxx [CONTENT] xxxx [/CONTENT] xxx [CONTENT][/CONTENT] xxx [CONTENT][/CONTENT] # target pair is the last one
"""
from metagpt.utils.repair_llm_raw_output import extract_content_from_output
output = (
'Sure! Here is the properly formatted JSON output based on the given context:\n\n[CONTENT]\n{\n"'
'Required Python third-party packages": [\n"pygame==2.0.4",\n"pytest"\n],\n"Required Other language '
'third-party packages": [\n"No third-party packages are required."\n],\n"Full API spec": "\nopenapi: '
"3.0.0\n\ndescription: A JSON object representing the game state.\n\npaths:\n game:\n get:\n "
"summary: Get the current game state.\n responses:\n 200:\n description: Game state."
"\n\n moves:\n post:\n summary: Make a move.\n requestBody:\n description: Move to be "
"made.\n content:\n applicationjson:\n schema:\n type: object\n "
" properties:\n x:\n type: integer\n y:\n "
" type: integer\n tile:\n type: object\n "
"properties:\n value:\n type: integer\n x:\n "
" type: integer\n y:\n type: integer\n\n "
"undo-move:\n post:\n summary: Undo the last move.\n responses:\n 200:\n "
" description: Undone move.\n\n end-game:\n post:\n summary: End the game.\n responses:\n "
" 200:\n description: Game ended.\n\n start-game:\n post:\n summary: Start a new "
"game.\n responses:\n 200:\n description: Game started.\n\n game-over:\n get:\n "
" summary: Check if the game is over.\n responses:\n 200:\n description: Game "
"over.\n 404:\n description: Game not over.\n\n score:\n get:\n summary: Get the "
"current score.\n responses:\n 200:\n description: Score.\n\n tile:\n get:\n "
"summary: Get a specific tile.\n parameters:\n tile_id:\n type: integer\n "
"description: ID of the tile to get.\n responses:\n 200:\n description: Tile.\n\n "
"tiles:\n get:\n summary: Get all tiles.\n responses:\n 200:\n description: "
"Tiles.\n\n level:\n get:\n summary: Get the current level.\n responses:\n 200:\n "
" description: Level.\n\n level-up:\n post:\n summary: Level up.\n responses:\n "
"200:\n description: Level up successful.\n\n level-down:\n post:\n summary: Level "
"down.\n responses:\n 200:\n description: Level down successful.\n\n restart:\n "
"post:\n summary: Restart the game.\n responses:\n 200:\n description: Game "
"restarted.\n\n help:\n get:\n summary: Get help.\n responses:\n 200:\n "
"description: Help.\n\n version:\n get:\n summary: Get the version of the game.\n "
'responses:\n 200:\n description: Version.\n\n}\n\n"Logic Analysis": [\n"game.py",'
'\n"Contains the game logic."\n],\n"Task list": [\n"game.py",\n"Contains the game logic and should be '
'done first."\n],\n"Shared Knowledge": "\n\'game.py\' contains the game logic.\n",\n"Anything '
'UNCLEAR": "How to start the game."\n]\n\n[/CONTENT] Great! Your JSON output is properly formatted '
"and correctly includes all the required sections. Here's a breakdown of what each section "
"contains:\n\nRequired Python third-party packages:\n\n* pygame==2.0.4\n* pytest\n\nRequired Other "
"language third-party packages:\n\n* No third-party packages are required.\n\nFull API spec:\n\n* "
"openapi: 3.0.0\n* description: A JSON object representing the game state.\n* paths:\n + game: "
"Get the current game state.\n + moves: Make a move.\n + undo-move: Undo the last move.\n + "
"end-game: End the game.\n + start-game: Start a new game.\n + game-over: Check if the game is "
"over.\n + score: Get the current score.\n + tile: Get a specific tile.\n + tiles: Get all tiles.\n "
"+ level: Get the current level.\n + level-up: Level up.\n + level-down: Level down.\n + restart: "
"Restart the game.\n + help: Get help.\n + version: Get the version of the game.\n\nLogic "
"Analysis:\n\n* game.py contains the game logic.\n\nTask list:\n\n* game.py contains the game logic "
"and should be done first.\n\nShared Knowledge:\n\n* 'game.py' contains the game logic.\n\nAnything "
"UNCLEAR:\n\n* How to start the game.\n\nGreat job! This JSON output should provide a clear and "
"comprehensive overview of the project's requirements and dependencies."
)
output = extract_content_from_output(output)
assert output.startswith('{\n"Required Python third-party packages') and output.endswith(
'UNCLEAR": "How to start the game."\n]'
)
output = (
"Sure, I would be happy to help! Here is the information you provided, formatted as a JSON object "
'inside the [CONTENT] tag:\n\n[CONTENT]\n{\n"Original Requirements": "Create a 2048 game",\n"Search '
'Information": "Search results for 2048 game",\n"Requirements": [\n"Create a game with the same rules '
'as the original 2048 game",\n"Implement a user interface that is easy to use and understand",\n"Add a '
'scoreboard to track the player progress",\n"Allow the player to undo and redo moves",\n"Implement a '
'game over screen to display the final score"\n],\n"Product Goals": [\n"Create a fun and engaging game '
'experience for the player",\n"Design a user interface that is visually appealing and easy to use",\n"'
'Optimize the game for performance and responsiveness"\n],\n"User Stories": [\n"As a player, I want to '
'be able to move tiles around the board to combine numbers",\n"As a player, I want to be able to undo '
'and redo moves to correct mistakes",\n"As a player, I want to see the final score and game over screen'
' when I win"\n],\n"Competitive Analysis": [\n"Competitor A: 2048 game with a simple user interface and'
' basic graphics",\n"Competitor B: 2048 game with a more complex user interface and better graphics",'
'\n"Competitor C: 2048 game with a unique twist on the rules and a more challenging gameplay experience"'
'\n],\n"Competitive Quadrant Chart": "quadrantChart\\n\ttitle Reach and engagement of campaigns\\n\t\t'
"x-axis Low Reach --> High Reach\\n\t\ty-axis Low Engagement --> High Engagement\\n\tquadrant-1 We "
"should expand\\n\tquadrant-2 Need to promote\\n\tquadrant-3 Re-evaluate\\n\tquadrant-4 May be "
"improved\\n\tCampaign A: [0.3, 0.6]\\n\tCampaign B: [0.45, 0.23]\\n\tCampaign C: [0.57, 0.69]\\n\t"
'Campaign D: [0.78, 0.34]\\n\tCampaign E: [0.40, 0.34]\\n\tCampaign F: [0.35, 0.78]"\n],\n"Requirement '
'Analysis": "The requirements are clear and well-defined, but there may be some ambiguity around the '
'specific implementation details",\n"Requirement Pool": [\n["P0", "Implement a game with the same '
'rules as the original 2048 game"],\n["P1", "Add a scoreboard to track the player progress"],\n["P2", '
'"Allow the player to undo and redo moves"]\n],\n"UI Design draft": "The UI should be simple and easy '
"to use, with a clean and visually appealing design. The game board should be the main focus of the "
'UI, with clear and concise buttons for the player to interact with.",\n"Anything UNCLEAR": ""\n}\n'
"[/CONTENT]\n\nI hope this helps! Let me know if you have any further questions or if there anything "
"else I can do to assist you."
)
output = extract_content_from_output(output)
assert output.startswith('{\n"Original Requirements"') and output.endswith('"Anything UNCLEAR": ""\n}')
output = """ Sure, I'd be happy to help! Here's the JSON output for the given context:\n\n[CONTENT]\n{
"Implementation approach": "We will use the open-source framework PyGame to create a 2D game engine, which will
provide us with a robust and efficient way to handle game logic and rendering. PyGame is widely used in the game
development community and has a large number of resources and tutorials available online.",\n"Python package name":
"pygame_2048",\n"File list": ["main.py", "game.py", "constants.py", "ui.py"],\n"Data structures and interface
definitions": '\nclassDiagram\n class Game{\n +int score\n +list<tile> tiles\n +function
move_tile(tile, int dx, int dy)\n +function undo_move()\n +function get_highest_score()\n }\n
class Tile{\n +int value\n +int x\n +int y\n }\n ...\n Game "1" -- "1" Food: has\n',
\n"Program call flow": '\nsequenceDiagram\n participant M as Main\n participant G as Game\n ...\n G->>M:
end game\n',\n"Anything UNCLEAR": "The requirement is clear to me."\n}\n[/CONTENT] Here's the JSON output for the
given context, wrapped inside the [CONTENT][/CONTENT] format:\n\n[CONTENT]\n{\n"Implementation approach": "We will
use the open-source framework PyGame to create a 2D game engine, which will provide us with a robust and efficient
way to handle game logic and rendering. PyGame is widely used in the game development community and has a large
number of resources and tutorials available online.",\n"Python package name": "pygame_2048",\n"File list":
["main.py", "game.py", "constants.py", "ui.py"],\n"Data structures and interface definitions": '\nclassDiagram\n
class Game{\n +int score\n +list<tile> tiles\n +function move_tile(tile, int dx, int dy)\n
+function undo_move()\n +function get_highest_score()\n }\n class Tile{\n +int value\n +int x\n
+int y\n }\n ...\n Game "1" -- "1" Food: has\n',\n"Program call flow": '\nsequenceDiagram\n participant
M as Main\n participant G as Game\n ...\n G->>M: end game\n',\n"Anything UNCLEAR": "The requirement is
clear to me."\n}\n[/CONTENT] Great! Your JSON output is well-formatted and provides all the necessary
information for a developer to understand the design and implementation of the 2048 game.
"""
output = extract_content_from_output(output)
assert output.startswith('{\n"Implementation approach"') and output.endswith(
'"Anything UNCLEAR": "The requirement is clear to me."\n}'
)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/test_human_interaction.py | tests/metagpt/utils/test_human_interaction.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : unittest of human_interaction
from pydantic import BaseModel
from metagpt.utils.human_interaction import HumanInteraction
class InstructContent(BaseModel):
test_field1: str = ""
test_field2: list[str] = []
data_mapping = {"test_field1": (str, ...), "test_field2": (list[str], ...)}
human_interaction = HumanInteraction()
def test_input_num(mocker):
mocker.patch("builtins.input", lambda _: "quit")
interact_contents = human_interaction.interact_with_instruct_content(InstructContent(), data_mapping)
assert len(interact_contents) == 0
mocker.patch("builtins.input", lambda _: "1")
input_num = human_interaction.input_num_until_valid(2)
assert input_num == 1
def test_check_input_type():
ret, _ = human_interaction.check_input_type(input_str="test string", req_type=str)
assert ret
ret, _ = human_interaction.check_input_type(input_str='["test string"]', req_type=list[str])
assert ret
ret, _ = human_interaction.check_input_type(input_str='{"key", "value"}', req_type=list[str])
assert not ret
global_index = 0
def mock_input(*args, **kwargs):
"""there are multi input call, return it by global_index"""
arr = ["1", '["test"]', "ignore", "quit"]
global global_index
global_index += 1
if global_index == 3:
raise EOFError()
val = arr[global_index - 1]
return val
def test_human_interact_valid_content(mocker):
mocker.patch("builtins.input", mock_input)
input_contents = HumanInteraction().interact_with_instruct_content(InstructContent(), data_mapping, "review")
assert len(input_contents) == 1
assert input_contents["test_field2"] == '["test"]'
global global_index
global_index = 0
input_contents = HumanInteraction().interact_with_instruct_content(InstructContent(), data_mapping, "revise")
assert len(input_contents) == 1
assert input_contents["test_field2"] == ["test"]
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/test_di_graph_repository.py | tests/metagpt/utils/test_di_graph_repository.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/12/19
@Author : mashenquan
@File : test_di_graph_repository.py
@Desc : Unit tests for di_graph_repository.py
"""
from pathlib import Path
import pytest
from pydantic import BaseModel
from metagpt.const import DEFAULT_WORKSPACE_ROOT
from metagpt.repo_parser import RepoParser
from metagpt.utils.di_graph_repository import DiGraphRepository
from metagpt.utils.graph_repository import GraphRepository
@pytest.mark.asyncio
async def test_di_graph_repository():
class Input(BaseModel):
s: str
p: str
o: str
inputs = [
{"s": "main.py:Game:draw", "p": "method:hasDescription", "o": "Draw image"},
{"s": "main.py:Game:draw", "p": "method:hasDescription", "o": "Show image"},
]
path = Path(__file__).parent
graph = DiGraphRepository(name="test", root=path)
for i in inputs:
data = Input(**i)
await graph.insert(subject=data.s, predicate=data.p, object_=data.o)
v = graph.json()
assert v
await graph.save()
assert graph.pathname.exists()
graph.pathname.unlink()
@pytest.mark.asyncio
async def test_js_parser():
class Input(BaseModel):
path: str
inputs = [
{"path": str(Path(__file__).parent / "../../data/code")},
]
path = Path(__file__).parent
graph = DiGraphRepository(name="test", root=path)
for i in inputs:
data = Input(**i)
repo_parser = RepoParser(base_directory=data.path)
symbols = repo_parser.generate_symbols()
for s in symbols:
await GraphRepository.update_graph_db_with_file_info(graph_db=graph, file_info=s)
data = graph.json()
assert data
@pytest.mark.asyncio
@pytest.mark.skip
async def test_codes():
path = DEFAULT_WORKSPACE_ROOT / "snake_game"
repo_parser = RepoParser(base_directory=path)
graph = DiGraphRepository(name="test", root=path)
symbols = repo_parser.generate_symbols()
for file_info in symbols:
for code_block in file_info.page_info:
try:
val = code_block.model_dump_json()
assert val
except TypeError as e:
assert not e
await GraphRepository.update_graph_db_with_file_info(graph_db=graph, file_info=file_info)
data = graph.json()
assert data
print(data)
@pytest.mark.asyncio
async def test_graph_select():
gdb_path = Path(__file__).parent / "../../data/graph_db/networkx.sequence_view.json"
gdb = await DiGraphRepository.load_from(gdb_path)
rows = await gdb.select()
assert rows
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/test_file_repository.py | tests/metagpt/utils/test_file_repository.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/11/20
@Author : mashenquan
@File : test_file_repository.py
@Desc: Unit tests for file_repository.py
"""
import shutil
from pathlib import Path
import pytest
from metagpt.utils.git_repository import ChangeType, GitRepository
from tests.metagpt.utils.test_git_repository import mock_file
@pytest.mark.asyncio
async def test_file_repo():
local_path = Path(__file__).parent / "file_repo_git"
if local_path.exists():
shutil.rmtree(local_path)
git_repo = GitRepository(local_path=local_path, auto_init=True)
assert not git_repo.changed_files
await mock_file(local_path / "g.txt", "")
file_repo_path = "file_repo1"
full_path = local_path / file_repo_path
assert not full_path.exists()
file_repo = git_repo.new_file_repository(file_repo_path)
assert file_repo.workdir == full_path
assert file_repo.workdir.exists()
await file_repo.save("a.txt", "AAA")
await file_repo.save("b.txt", "BBB", [str(full_path / "a.txt"), f"{file_repo_path}/c.txt"])
doc = await file_repo.get("a.txt")
assert "AAA" == doc.content
doc = await file_repo.get("b.txt")
assert "BBB" == doc.content
assert {f"{file_repo_path}/a.txt", f"{file_repo_path}/c.txt"} == await file_repo.get_dependency("b.txt")
assert {"a.txt": ChangeType.UNTRACTED, "b.txt": ChangeType.UNTRACTED} == file_repo.changed_files
assert {f"{file_repo_path}/a.txt"} == await file_repo.get_changed_dependency("b.txt")
await file_repo.save("d/e.txt", "EEE")
assert ["d/e.txt"] == file_repo.get_change_dir_files("d")
assert set(file_repo.all_files) == {"a.txt", "b.txt", "d/e.txt"}
await file_repo.delete("d/e.txt")
await file_repo.delete("d/e.txt") # delete twice
assert set(file_repo.all_files) == {"a.txt", "b.txt"}
await file_repo.delete("b.txt")
assert set(file_repo.all_files) == {"a.txt"}
git_repo.delete_repository()
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/test_tree.py | tests/metagpt/utils/test_tree.py | from pathlib import Path
from typing import List
import pytest
from metagpt.utils.tree import _print_tree, tree
@pytest.mark.parametrize(
("root", "rules"),
[
(str(Path(__file__).parent / "../.."), None),
(str(Path(__file__).parent / "../.."), str(Path(__file__).parent / "../../../.gitignore")),
],
)
def test_tree(root: str, rules: str):
v = tree(root=root, gitignore=rules)
assert v
@pytest.mark.parametrize(
("root", "rules"),
[
(str(Path(__file__).parent / "../.."), None),
(str(Path(__file__).parent / "../.."), str(Path(__file__).parent / "../../../.gitignore")),
],
)
def test_tree_command(root: str, rules: str):
v = tree(root=root, gitignore=rules, run_command=True)
assert v
@pytest.mark.parametrize(
("tree", "want"),
[
({"a": {"b": {}, "c": {}}}, ["a", "+-- b", "+-- c"]),
({"a": {"b": {}, "c": {"d": {}}}}, ["a", "+-- b", "+-- c", " +-- d"]),
(
{"a": {"b": {"e": {"f": {}, "g": {}}}, "c": {"d": {}}}},
["a", "+-- b", "| +-- e", "| +-- f", "| +-- g", "+-- c", " +-- d"],
),
(
{"h": {"a": {"b": {"e": {"f": {}, "g": {}}}, "c": {"d": {}}}, "i": {}}},
[
"h",
"+-- a",
"| +-- b",
"| | +-- e",
"| | +-- f",
"| | +-- g",
"| +-- c",
"| +-- d",
"+-- i",
],
),
],
)
def test__print_tree(tree: dict, want: List[str]):
v = _print_tree(tree)
assert v == want
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/test_output_parser.py | tests/metagpt/utils/test_output_parser.py | #!/usr/bin/env python
# coding: utf-8
"""
@Time : 2023/7/11 10:25
@Author : chengmaoyu
@File : test_output_parser.py
"""
from typing import List, Tuple, Union
import pytest
from metagpt.utils.common import OutputParser
def test_parse_blocks():
test_text = "##block1\nThis is block 1.\n##block2\nThis is block 2."
expected_result = {"block1": "This is block 1.", "block2": "This is block 2."}
assert OutputParser.parse_blocks(test_text) == expected_result
def test_parse_code():
test_text = "```python\nprint('Hello, world!')```"
expected_result = "print('Hello, world!')"
assert OutputParser.parse_code(test_text, "python") == expected_result
with pytest.raises(Exception):
OutputParser.parse_code(test_text, "java")
def test_parse_python_code():
expected_result = "print('Hello, world!')"
assert OutputParser.parse_python_code("```python\nprint('Hello, world!')```") == expected_result
assert OutputParser.parse_python_code("```python\nprint('Hello, world!')") == expected_result
assert OutputParser.parse_python_code("print('Hello, world!')") == expected_result
assert OutputParser.parse_python_code("print('Hello, world!')```") == expected_result
assert OutputParser.parse_python_code("print('Hello, world!')```") == expected_result
expected_result = "print('```Hello, world!```')"
assert OutputParser.parse_python_code("```python\nprint('```Hello, world!```')```") == expected_result
assert OutputParser.parse_python_code("The code is: ```python\nprint('```Hello, world!```')```") == expected_result
assert OutputParser.parse_python_code("xxx.\n```python\nprint('```Hello, world!```')```\nxxx") == expected_result
with pytest.raises(ValueError):
OutputParser.parse_python_code("xxx =")
def test_parse_str():
test_text = "name = 'Alice'"
expected_result = "Alice"
assert OutputParser.parse_str(test_text) == expected_result
def test_parse_file_list():
test_text = "files=['file1', 'file2', 'file3']"
expected_result = ["file1", "file2", "file3"]
assert OutputParser.parse_file_list(test_text) == expected_result
# with pytest.raises(Exception):
# OutputParser.parse_file_list("wrong_input")
def test_parse_data():
test_data = "##block1\n```python\nprint('Hello, world!')\n```\n##block2\nfiles=['file1', 'file2', 'file3']"
expected_result = {"block1": "print('Hello, world!')\n", "block2": ["file1", "file2", "file3"]}
assert OutputParser.parse_data(test_data) == expected_result
@pytest.mark.parametrize(
("text", "data_type", "parsed_data", "expected_exception"),
[
(
"""xxx [1, 2, ["a", "b", [3, 4]], {"x": 5, "y": [6, 7]}] xxx""",
list,
[1, 2, ["a", "b", [3, 4]], {"x": 5, "y": [6, 7]}],
None,
),
(
"""xxx ["1", "2", "3"] xxx \n xxx \t xx""",
list,
["1", "2", "3"],
None,
),
(
"""{"title": "a", "directory": {"sub_dir1": ["title1, title2"]}, "sub_dir2": [1, 2]}""",
dict,
{"title": "a", "directory": {"sub_dir1": ["title1, title2"]}, "sub_dir2": [1, 2]},
None,
),
(
"""xxx {"title": "x", \n \t "directory": ["x", \n "y"]} xxx \n xxx \t xx""",
dict,
{"title": "x", "directory": ["x", "y"]},
None,
),
(
"""xxx xx""",
list,
[],
[],
),
(
"""xxx [1, 2, []xx""",
list,
None,
Exception,
),
],
)
def test_extract_struct(
text: str, data_type: Union[type(list), type(dict)], parsed_data: Union[list, dict], expected_exception
):
def case():
resp = OutputParser.extract_struct(text, data_type)
assert resp == parsed_data
if expected_exception:
with pytest.raises(expected_exception):
case()
else:
case()
def test_parse_with_markdown_mapping():
OUTPUT_MAPPING = {
"Original Requirements": (str, ...),
"Product Goals": (List[str], ...),
"User Stories": (List[str], ...),
"Competitive Analysis": (List[str], ...),
"Competitive Quadrant Chart": (str, ...),
"Requirement Analysis": (str, ...),
"Requirement Pool": (List[Tuple[str, str]], ...),
"Anything UNCLEAR": (str, ...),
}
t_text_with_content_tag = """[CONTENT]## Original Requirements:
The user wants to create a web-based version of the game "Fly Bird".
## Product Goals:
- Create a web-based version of the game "Fly Bird" that is engaging and addictive.
- Provide a seamless and intuitive user experience.
- Optimize the game for different devices and screen sizes.
## User Stories:
- As a user, I want to be able to control the bird's flight by clicking or tapping on the screen.
- As a user, I want to see my score and the highest score achieved in the game.
- As a user, I want the game to be challenging but not frustratingly difficult.
- As a user, I want to be able to pause and resume the game at any time.
- As a user, I want to be able to share my score on social media.
## Competitive Analysis:
- Flappy Bird: A popular mobile game where the player controls a bird's flight through a series of obstacles.
- Angry Birds: A physics-based puzzle game where the player launches birds to destroy structures and defeat pigs.
- Snake Game: A classic game where the player controls a snake to eat food and grow longer without hitting the walls or its own body.
- Temple Run: An endless running game where the player controls a character to avoid obstacles and collect coins.
- Subway Surfers: An endless running game where the player controls a character to avoid obstacles and collect coins while being chased by a guard.
- Doodle Jump: A vertical platform game where the player controls a character to jump on platforms and avoid falling.
- Fruit Ninja: A fruit-slicing game where the player uses their finger to slice flying fruits.
## Competitive Quadrant Chart:
```mermaid
quadrantChart
title Reach and engagement of games
x-axis Low Reach --> High Reach
y-axis Low Engagement --> High Engagement
quadrant-1 We should expand
quadrant-2 Need to promote
quadrant-3 Re-evaluate
quadrant-4 May be improved
"Flappy Bird": [0.8, 0.9]
"Angry Birds": [0.9, 0.8]
"Snake Game": [0.6, 0.6]
"Temple Run": [0.9, 0.7]
"Subway Surfers": [0.9, 0.7]
"Doodle Jump": [0.7, 0.5]
"Fruit Ninja": [0.8, 0.6]
"Our Target Product": [0.7, 0.8]
```
## Requirement Analysis:
The product should be a web-based version of the game "Fly Bird" that is engaging, addictive, and optimized for different devices and screen sizes. It should provide a seamless and intuitive user experience, with controls that allow the user to control the bird's flight by clicking or tapping on the screen. The game should display the user's score and the highest score achieved. It should be challenging but not frustratingly difficult, allowing the user to pause and resume the game at any time. The user should also have the option to share their score on social media.
## Requirement Pool:
```python
[
("Implement bird's flight control using click or tap", "P0"),
("Display user's score and highest score achieved", "P0"),
("Implement challenging but not frustrating difficulty level", "P1"),
("Allow user to pause and resume the game", "P1"),
("Implement social media sharing feature", "P2")
]
```
## Anything UNCLEAR:
There are no unclear points.
[/CONTENT]"""
t_text_raw = t_text_with_content_tag.replace("[CONTENT]", "").replace("[/CONTENT]", "")
d = OutputParser.parse_data_with_mapping(t_text_with_content_tag, OUTPUT_MAPPING)
import json
print(json.dumps(d))
assert d["Original Requirements"] == t_text_raw.split("## Original Requirements:")[1].split("##")[0].strip()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/test_text.py | tests/metagpt/utils/test_text.py | import pytest
from metagpt.utils.text import (
decode_unicode_escape,
generate_prompt_chunk,
reduce_message_length,
split_paragraph,
)
def _msgs():
length = 20
while length:
yield "Hello," * 1000 * length
length -= 1
def _paragraphs(n):
return " ".join("Hello World." for _ in range(n))
@pytest.mark.parametrize(
"msgs, model, system_text, reserved, expected",
[
(_msgs(), "gpt-3.5-turbo-0613", "System", 1500, 1),
(_msgs(), "gpt-3.5-turbo-16k", "System", 3000, 6),
(_msgs(), "gpt-3.5-turbo-16k", "Hello," * 1000, 3000, 5),
(_msgs(), "gpt-4", "System", 2000, 3),
(_msgs(), "gpt-4", "Hello," * 1000, 2000, 2),
(_msgs(), "gpt-4-32k", "System", 4000, 14),
(_msgs(), "gpt-4-32k", "Hello," * 2000, 4000, 12),
],
)
def test_reduce_message_length(msgs, model_name, system_text, reserved, expected):
length = len(reduce_message_length(msgs, model_name, system_text, reserved)) / (len("Hello,")) / 1000
assert length == expected
@pytest.mark.parametrize(
"text, prompt_template, model, system_text, reserved, expected",
[
(" ".join("Hello World." for _ in range(1000)), "Prompt: {}", "gpt-3.5-turbo-0613", "System", 1500, 2),
(" ".join("Hello World." for _ in range(1000)), "Prompt: {}", "gpt-3.5-turbo-16k", "System", 3000, 1),
(" ".join("Hello World." for _ in range(4000)), "Prompt: {}", "gpt-4", "System", 2000, 2),
(" ".join("Hello World." for _ in range(8000)), "Prompt: {}", "gpt-4-32k", "System", 4000, 1),
(" ".join("Hello World" for _ in range(8000)), "Prompt: {}", "gpt-3.5-turbo-0613", "System", 1000, 8),
],
)
def test_generate_prompt_chunk(text, prompt_template, model_name, system_text, reserved, expected):
chunk = len(list(generate_prompt_chunk(text, prompt_template, model_name, system_text, reserved)))
assert chunk == expected
@pytest.mark.parametrize(
"paragraph, sep, count, expected",
[
(_paragraphs(10), ".", 2, [_paragraphs(5), f" {_paragraphs(5)}"]),
(_paragraphs(10), ".", 3, [_paragraphs(4), f" {_paragraphs(3)}", f" {_paragraphs(3)}"]),
(f"{_paragraphs(5)}\n{_paragraphs(3)}", "\n.", 2, [f"{_paragraphs(5)}\n", _paragraphs(3)]),
("......", ".", 2, ["...", "..."]),
("......", ".", 3, ["..", "..", ".."]),
(".......", ".", 2, ["....", "..."]),
],
)
def test_split_paragraph(paragraph, sep, count, expected):
ret = split_paragraph(paragraph, sep, count)
assert ret == expected
@pytest.mark.parametrize(
"text, expected",
[
("Hello\\nWorld", "Hello\nWorld"),
("Hello\\tWorld", "Hello\tWorld"),
("Hello\\u0020World", "Hello World"),
],
)
def test_decode_unicode_escape(text, expected):
assert decode_unicode_escape(text) == expected
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/test_pycst.py | tests/metagpt/utils/test_pycst.py | from metagpt.utils import pycst
code = """
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import overload
@overload
def add_numbers(a: int, b: int):
...
@overload
def add_numbers(a: float, b: float):
...
def add_numbers(a: int, b: int):
return a + b
class Person:
def __init__(self, name: str, age: int):
self.name = name
self.age = age
def greet(self):
return f"Hello, my name is {self.name} and I am {self.age} years old."
"""
documented_code = '''
"""
This is an example module containing a function and a class definition.
"""
def add_numbers(a: int, b: int):
"""This function is used to add two numbers and return the result.
Parameters:
a: The first integer.
b: The second integer.
Returns:
int: The sum of the two numbers.
"""
return a + b
class Person:
"""This class represents a person's information, including name and age.
Attributes:
name: The person's name.
age: The person's age.
"""
def __init__(self, name: str, age: int):
"""Creates a new instance of the Person class.
Parameters:
name: The person's name.
age: The person's age.
"""
...
def greet(self):
"""
Returns a greeting message including the name and age.
Returns:
str: The greeting message.
"""
...
'''
merged_code = '''
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This is an example module containing a function and a class definition.
"""
from typing import overload
@overload
def add_numbers(a: int, b: int):
...
@overload
def add_numbers(a: float, b: float):
...
def add_numbers(a: int, b: int):
"""This function is used to add two numbers and return the result.
Parameters:
a: The first integer.
b: The second integer.
Returns:
int: The sum of the two numbers.
"""
return a + b
class Person:
"""This class represents a person's information, including name and age.
Attributes:
name: The person's name.
age: The person's age.
"""
def __init__(self, name: str, age: int):
"""Creates a new instance of the Person class.
Parameters:
name: The person's name.
age: The person's age.
"""
self.name = name
self.age = age
def greet(self):
"""
Returns a greeting message including the name and age.
Returns:
str: The greeting message.
"""
return f"Hello, my name is {self.name} and I am {self.age} years old."
'''
def test_merge_docstring():
data = pycst.merge_docstring(code, documented_code)
print(data)
assert data == merged_code
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/__init__.py | tests/metagpt/utils/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/4/29 16:01
@Author : alexanderwu
@File : __init__.py
"""
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/test_parse_html.py | tests/metagpt/utils/test_parse_html.py | from metagpt.utils import parse_html
PAGE = """
<!DOCTYPE html>
<html>
<head>
<title>Random HTML Example</title>
</head>
<body>
<h1>This is a Heading</h1>
<p>This is a paragraph with <a href="test">a link</a> and some <em>emphasized</em> text.</p>
<ul>
<li>Item 1</li>
<li>Item 2</li>
<li>Item 3</li>
</ul>
<ol>
<li>Numbered Item 1</li>
<li>Numbered Item 2</li>
<li>Numbered Item 3</li>
</ol>
<table>
<tr>
<th>Header 1</th>
<th>Header 2</th>
</tr>
<tr>
<td>Row 1, Cell 1</td>
<td>Row 1, Cell 2</td>
</tr>
<tr>
<td>Row 2, Cell 1</td>
<td>Row 2, Cell 2</td>
</tr>
</table>
<img src="image.jpg" alt="Sample Image">
<form action="/submit" method="post">
<label for="name">Name:</label>
<input type="text" id="name" name="name" required>
<label for="email">Email:</label>
<input type="email" id="email" name="email" required>
<button type="submit">Submit</button>
</form>
<div class="box">
<p>This is a div with a class "box".</p>
<p><a href="https://metagpt.com">a link</a></p>
<p><a href="#section2"></a></p>
<p><a href="ftp://192.168.1.1:8080"></a></p>
<p><a href="javascript:alert('Hello');"></a></p>
</div>
</body>
</html>
"""
CONTENT = (
"This is a HeadingThis is a paragraph witha linkand someemphasizedtext.Item 1Item 2Item 3Numbered Item 1Numbered "
"Item 2Numbered Item 3Header 1Header 2Row 1, Cell 1Row 1, Cell 2Row 2, Cell 1Row 2, Cell 2Name:Email:SubmitThis is a div "
'with a class "box".a link'
)
def test_web_page():
page = parse_html.WebPage(inner_text=CONTENT, html=PAGE, url="http://example.com")
assert page.title == "Random HTML Example"
assert list(page.get_links()) == ["http://example.com/test", "https://metagpt.com"]
def test_get_page_content():
ret = parse_html.get_html_content(PAGE, "http://example.com")
assert ret == CONTENT
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/test_save_code.py | tests/metagpt/utils/test_save_code.py | # -*- coding: utf-8 -*-
# @Date : 12/12/2023 4:17 PM
# @Author : stellahong (stellahong@fuzhi.ai)
# @Desc :
import nbformat
import pytest
from metagpt.actions.di.execute_nb_code import ExecuteNbCode
from metagpt.utils.common import read_json_file
from metagpt.utils.save_code import DATA_PATH, save_code_file
def test_save_code_file_python():
save_code_file("example", "print('Hello, World!')")
file_path = DATA_PATH / "output" / "example" / "code.py"
assert file_path.exists(), f"File does not exist: {file_path}"
content = file_path.read_text()
assert "print('Hello, World!')" in content, "File content does not match"
def test_save_code_file_json():
save_code_file("example_json", "print('Hello, JSON!')", file_format="json")
file_path = DATA_PATH / "output" / "example_json" / "code.json"
data = read_json_file(file_path)
assert "code" in data, "JSON key 'code' is missing"
assert data["code"] == "print('Hello, JSON!')", "JSON content does not match"
@pytest.mark.asyncio
async def test_save_code_file_notebook():
code = "print('Hello, World!')"
executor = ExecuteNbCode()
await executor.run(code)
# Save as a Notebook file
save_code_file("example_nb", executor.nb, file_format="ipynb")
file_path = DATA_PATH / "output" / "example_nb" / "code.ipynb"
assert file_path.exists(), f"Notebook file does not exist: {file_path}"
# Additional checks specific to notebook format
notebook = nbformat.read(file_path, as_version=4)
assert len(notebook.cells) > 0, "Notebook should have at least one cell"
first_cell_source = notebook.cells[0].source
assert "print" in first_cell_source, "Notebook cell content does not match"
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/test_project_repo.py | tests/metagpt/utils/test_project_repo.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/1/8
@Author : mashenquan
"""
import uuid
from pathlib import Path
import pytest
from metagpt.const import (
BUGFIX_FILENAME,
PACKAGE_REQUIREMENTS_FILENAME,
PRDS_FILE_REPO,
REQUIREMENT_FILENAME,
)
from metagpt.utils.project_repo import ProjectRepo
async def test_project_repo():
root = Path(__file__).parent / f"../../../workspace/unittest/{uuid.uuid4().hex}"
root = root.resolve()
pr = ProjectRepo(root=str(root))
assert pr.git_repo.workdir == root
assert pr.workdir == pr.git_repo.workdir
await pr.save(filename=REQUIREMENT_FILENAME, content=REQUIREMENT_FILENAME)
doc = await pr.get(filename=REQUIREMENT_FILENAME)
assert doc.content == REQUIREMENT_FILENAME
await pr.save(filename=BUGFIX_FILENAME, content=BUGFIX_FILENAME)
doc = await pr.get(filename=BUGFIX_FILENAME)
assert doc.content == BUGFIX_FILENAME
await pr.save(filename=PACKAGE_REQUIREMENTS_FILENAME, content=PACKAGE_REQUIREMENTS_FILENAME)
doc = await pr.get(filename=PACKAGE_REQUIREMENTS_FILENAME)
assert doc.content == PACKAGE_REQUIREMENTS_FILENAME
await pr.docs.prd.save(filename="1.prd", content="1.prd", dependencies=[REQUIREMENT_FILENAME])
doc = await pr.docs.prd.get(filename="1.prd")
assert doc.content == "1.prd"
await pr.resources.prd.save(
filename="1.prd",
content="1.prd",
dependencies=[REQUIREMENT_FILENAME, f"{PRDS_FILE_REPO}/1.prd"],
)
doc = await pr.resources.prd.get(filename="1.prd")
assert doc.content == "1.prd"
dependencies = await pr.resources.prd.get_dependency(filename="1.prd")
assert len(dependencies) == 2
assert pr.changed_files
assert pr.docs.prd.changed_files
assert not pr.tests.changed_files
with pytest.raises(ValueError):
pr.srcs
assert pr.with_src_path("test_src").srcs.root_path == Path("test_src")
assert pr.src_relative_path == Path("test_src")
pr.git_repo.delete_repository()
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/test_json_to_markdown.py | tests/metagpt/utils/test_json_to_markdown.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/9/11 11:53
@Author : femto Zheng
@File : test_json_to_markdown.py
"""
from metagpt.utils.json_to_markdown import json_to_markdown
def test_json_to_markdown():
# Example nested JSON data
json_data = {
"title": "Sample JSON to Markdown Conversion",
"description": "Convert JSON to Markdown with headings and lists.",
"tags": ["json", "markdown", "conversion"],
"content": {
"section1": {"subsection1": "This is a subsection.", "subsection2": "Another subsection."},
"section2": "This is the second section content.",
},
}
# Convert JSON to Markdown with nested sections
markdown_output = json_to_markdown(json_data)
expected = """## title
Sample JSON to Markdown Conversion
## description
Convert JSON to Markdown with headings and lists.
## tags
- json
- markdown
- conversion
## content
### section1
#### subsection1
This is a subsection.
#### subsection2
Another subsection.
### section2
This is the second section content.
"""
# Print or use the generated Markdown
# print(markdown_output)
assert expected == markdown_output
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/test_mermaid.py | tests/metagpt/utils/test_mermaid.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/12/27
@Author : mashenquan
@File : test_mermaid.py
"""
import pytest
from metagpt.const import DEFAULT_WORKSPACE_ROOT
from metagpt.utils.common import check_cmd_exists, new_transaction_id
from metagpt.utils.mermaid import MMC1, mermaid_to_file
@pytest.mark.asyncio
@pytest.mark.parametrize(
("engine", "suffixes"), [("nodejs", None), ("nodejs", ["png", "svg", "pdf"]), ("ink", None)]
) # TODO: playwright and pyppeteer
async def test_mermaid(engine, suffixes, context, mermaid_mocker):
# nodejs prerequisites: npm install -g @mermaid-js/mermaid-cli
# ink prerequisites: connected to internet
# playwright prerequisites: playwright install --with-deps chromium
assert check_cmd_exists("npm") == 0
save_to = DEFAULT_WORKSPACE_ROOT / f"{new_transaction_id()}/{engine}/1"
await mermaid_to_file(engine, MMC1, save_to, suffixes=suffixes)
# ink does not support pdf
exts = ["." + i for i in suffixes] if suffixes else [".png"]
if engine == "ink":
for ext in exts:
assert save_to.with_suffix(ext).exists()
save_to.with_suffix(ext).unlink(missing_ok=True)
else:
for ext in exts:
assert save_to.with_suffix(ext).exists()
save_to.with_suffix(ext).unlink(missing_ok=True)
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/test_visual_graph_repo.py | tests/metagpt/utils/test_visual_graph_repo.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/1/4
@Author : mashenquan
@File : test_visual_graph_repo.py
@Desc : Unit tests for testing and demonstrating the usage of VisualDiGraphRepo.
"""
import re
from pathlib import Path
import pytest
from metagpt.utils.common import remove_affix, split_namespace
from metagpt.utils.visual_graph_repo import VisualDiGraphRepo
@pytest.mark.asyncio
async def test_visual_di_graph_repo(context, mocker):
filename = Path(__file__).parent / "../../data/graph_db/networkx.sequence_view.json"
repo = await VisualDiGraphRepo.load_from(filename=filename)
class_view = await repo.get_mermaid_class_view()
assert class_view
await context.repo.resources.graph_repo.save(filename="class_view.md", content=f"```mermaid\n{class_view}\n```\n")
sequence_views = await repo.get_mermaid_sequence_views()
assert sequence_views
for ns, sqv in sequence_views:
filename = re.sub(r"[:/\\\.]+", "_", ns) + ".sequence_view.md"
sqv = sqv.strip(" `")
await context.repo.resources.graph_repo.save(filename=filename, content=f"```mermaid\n{sqv}\n```\n")
sequence_view_vers = await repo.get_mermaid_sequence_view_versions()
assert sequence_view_vers
for ns, sqv in sequence_view_vers:
ver, sqv = split_namespace(sqv)
filename = re.sub(r"[:/\\\.]+", "_", ns) + f".{ver}.sequence_view_ver.md"
sqv = remove_affix(sqv).strip(" `")
await context.repo.resources.graph_repo.save(filename=filename, content=f"```mermaid\n{sqv}\n```\n")
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/test_cost_manager.py | tests/metagpt/utils/test_cost_manager.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/12/27
@Author : mashenquan
@File : test_cost_manager.py
"""
import pytest
from metagpt.utils.cost_manager import CostManager
def test_cost_manager():
cm = CostManager(total_budget=20)
cm.update_cost(prompt_tokens=1000, completion_tokens=100, model="gpt-4-turbo")
assert cm.get_total_prompt_tokens() == 1000
assert cm.get_total_completion_tokens() == 100
assert cm.get_total_cost() == 0.013
cm.update_cost(prompt_tokens=100, completion_tokens=10, model="gpt-4-turbo")
assert cm.get_total_prompt_tokens() == 1100
assert cm.get_total_completion_tokens() == 110
assert cm.get_total_cost() == 0.0143
cost = cm.get_costs()
assert cost
assert cost.total_cost == cm.get_total_cost()
assert cost.total_prompt_tokens == cm.get_total_prompt_tokens()
assert cost.total_completion_tokens == cm.get_total_completion_tokens()
assert cost.total_budget == 20
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/roles/test_invoice_ocr_assistant.py | tests/metagpt/roles/test_invoice_ocr_assistant.py | #!/usr/bin/env python3
# _*_ coding: utf-8 _*_
"""
@Time : 2023/9/21 23:11:27
@Author : Stitch-z
@File : test_invoice_ocr_assistant.py
"""
from pathlib import Path
import pandas as pd
import pytest
from metagpt.const import DATA_PATH, TEST_DATA_PATH
from metagpt.roles.invoice_ocr_assistant import InvoiceOCRAssistant, InvoicePath
from metagpt.schema import Message
@pytest.mark.asyncio
@pytest.mark.parametrize(
("query", "invoice_path", "invoice_table_path", "expected_result"),
[
(
"Invoicing date",
Path("invoices/invoice-1.pdf"),
Path("invoice_table/invoice-1.xlsx"),
{"收款人": "小明", "城市": "深圳", "总费用/元": 412.00, "开票日期": "2023年02月03日"},
),
(
"Invoicing date",
Path("invoices/invoice-2.png"),
Path("invoice_table/invoice-2.xlsx"),
{"收款人": "铁头", "城市": "广州", "总费用/元": 898.00, "开票日期": "2023年03月17日"},
),
(
"Invoicing date",
Path("invoices/invoice-3.jpg"),
Path("invoice_table/invoice-3.xlsx"),
{"收款人": "夏天", "城市": "福州", "总费用/元": 2462.00, "开票日期": "2023年08月26日"},
),
],
)
async def test_invoice_ocr_assistant(
query: str, invoice_path: Path, invoice_table_path: Path, expected_result: dict, context
):
invoice_path = TEST_DATA_PATH / invoice_path
role = InvoiceOCRAssistant(context=context)
await role.run(Message(content=query, instruct_content=InvoicePath(file_path=invoice_path)))
invoice_table_path = DATA_PATH / invoice_table_path
df = pd.read_excel(invoice_table_path)
resp = df.to_dict(orient="records")
assert isinstance(resp, list)
assert len(resp) == 1
resp = resp[0]
assert expected_result["收款人"] == resp["收款人"]
assert expected_result["城市"] in resp["城市"]
assert float(expected_result["总费用/元"]) == float(resp["总费用/元"])
assert expected_result["开票日期"] == resp["开票日期"]
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/roles/test_assistant.py | tests/metagpt/roles/test_assistant.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/12/25
@Author : mashenquan
@File : test_asssistant.py
@Desc : Used by AgentStore.
"""
import pytest
from pydantic import BaseModel
from metagpt.actions.skill_action import SkillAction
from metagpt.actions.talk_action import TalkAction
from metagpt.memory.brain_memory import BrainMemory
from metagpt.roles.assistant import Assistant
from metagpt.schema import Message
from metagpt.utils.common import any_to_str
@pytest.mark.asyncio
async def test_run(mocker, context):
# mock
mocker.patch("metagpt.learn.text_to_image", return_value="http://mock.com/1.png")
context.kwargs.language = "Chinese"
class Input(BaseModel):
memory: BrainMemory
language: str
agent_description: str
cause_by: str
agent_skills: list
agent_skills = [
{"id": 1, "name": "text_to_speech", "type": "builtin", "config": {}, "enabled": True},
{"id": 2, "name": "text_to_image", "type": "builtin", "config": {}, "enabled": True},
{"id": 3, "name": "ai_call", "type": "builtin", "config": {}, "enabled": True},
{"id": 3, "name": "data_analysis", "type": "builtin", "config": {}, "enabled": True},
{"id": 5, "name": "crawler", "type": "builtin", "config": {"engine": "ddg"}, "enabled": True},
{"id": 6, "name": "knowledge", "type": "builtin", "config": {}, "enabled": True},
{"id": 6, "name": "web_search", "type": "builtin", "config": {}, "enabled": True},
]
inputs = [
{
"memory": {
"history": [
{
"content": "who is tulin",
"role": "user",
"id": "1",
},
{"content": "The one who eaten a poison apple.", "role": "assistant"},
],
"knowledge": [{"content": "tulin is a scientist."}],
"last_talk": "Do you have a poison apple?",
},
"language": "English",
"agent_description": "chatterbox",
"cause_by": any_to_str(TalkAction),
"agent_skills": [],
},
{
"memory": {
"history": [
{
"content": "can you draw me an picture?",
"role": "user",
"id": "1",
},
{"content": "Yes, of course. What do you want me to draw", "role": "assistant"},
],
"knowledge": [{"content": "tulin is a scientist."}],
"last_talk": "Draw me an apple.",
},
"language": "English",
"agent_description": "painter",
"cause_by": any_to_str(SkillAction),
"agent_skills": agent_skills,
},
]
for i in inputs:
seed = Input(**i)
role = Assistant(language="Chinese", context=context)
role.context.kwargs.language = seed.language
role.context.kwargs.agent_description = seed.agent_description
role.context.kwargs.agent_skills = seed.agent_skills
role.memory = seed.memory # Restore historical conversation content.
while True:
has_action = await role.think()
if not has_action:
break
msg: Message = await role.act()
# logger.info(msg)
assert msg
assert msg.cause_by == seed.cause_by
assert msg.content
@pytest.mark.parametrize(
"memory",
[
{
"history": [
{
"content": "can you draw me an picture?",
"role": "user",
"id": "1",
},
{"content": "Yes, of course. What do you want me to draw", "role": "assistant"},
],
"knowledge": [{"content": "tulin is a scientist."}],
"last_talk": "Draw me an apple.",
}
],
)
@pytest.mark.asyncio
async def test_memory(memory, context):
role = Assistant(context=context)
role.context.kwargs.agent_skills = []
role.load_memory(memory)
val = role.get_memory()
assert val
await role.talk("draw apple")
await role.think()
assert isinstance(role.rc.todo, TalkAction)
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/roles/test_architect.py | tests/metagpt/roles/test_architect.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/20 14:37
@Author : alexanderwu
@File : test_architect.py
@Modified By: mashenquan, 2023-11-1. In accordance with Chapter 2.2.1 and 2.2.2 of RFC 116, utilize the new message
distribution feature for message handling.
"""
import uuid
from pathlib import Path
import pytest
from metagpt.actions import WritePRD
from metagpt.actions.di.run_command import RunCommand
from metagpt.const import PRDS_FILE_REPO
from metagpt.logs import logger
from metagpt.roles import Architect
from metagpt.schema import Message
from metagpt.utils.common import any_to_str, awrite
from tests.metagpt.roles.mock import MockMessages
@pytest.mark.asyncio
async def test_architect(context):
# Prerequisites
filename = uuid.uuid4().hex + ".json"
await awrite(Path(context.config.project_path) / PRDS_FILE_REPO / filename, data=MockMessages.prd.content)
role = Architect(context=context)
rsp = await role.run(with_message=Message(content="", cause_by=WritePRD))
logger.info(rsp)
assert len(rsp.content) > 0
assert rsp.cause_by == any_to_str(RunCommand)
# test update
rsp = await role.run(with_message=Message(content="", cause_by=WritePRD))
assert rsp
assert rsp.cause_by == any_to_str(RunCommand)
assert len(rsp.content) > 0
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/roles/test_engineer.py | tests/metagpt/roles/test_engineer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/12 10:14
@Author : alexanderwu
@File : test_engineer.py
@Modified By: mashenquan, 2023-11-1. In accordance with Chapter 2.2.1 and 2.2.2 of RFC 116, utilize the new message
distribution feature for message handling.
"""
import json
from pathlib import Path
from types import SimpleNamespace
import pytest
from metagpt.actions import WriteCode, WriteTasks
from metagpt.const import REQUIREMENT_FILENAME, SYSTEM_DESIGN_FILE_REPO, TASK_FILE_REPO
from metagpt.logs import logger
from metagpt.roles.engineer import Engineer
from metagpt.schema import CodingContext, Message
from metagpt.utils.common import CodeParser, any_to_name, any_to_str, aread, awrite
from metagpt.utils.git_repository import ChangeType
from metagpt.utils.project_repo import ProjectRepo
from tests.metagpt.roles.mock import STRS_FOR_PARSING, TASKS, MockMessages
@pytest.mark.asyncio
async def test_engineer(context):
# Prerequisites
rqno = "20231221155954.json"
project_repo = ProjectRepo(context.config.project_path)
# 设置engineer
engineer = Engineer(context=context)
engineer.repo = project_repo
engineer.input_args = SimpleNamespace(project_path=context.config.project_path)
# 使用project_repo保存所需文件
await project_repo.save(REQUIREMENT_FILENAME, content=MockMessages.req.content)
await project_repo.docs.prd.save(rqno, content=MockMessages.prd.content)
await project_repo.docs.system_design.save(rqno, content=MockMessages.system_design.content)
await project_repo.docs.task.save(rqno, content=MockMessages.json_tasks.content)
rsp = await engineer.run(Message(content="", cause_by=WriteTasks))
logger.info(rsp)
assert rsp.cause_by == any_to_str(WriteCode)
assert context.repo.with_src_path(context.src_workspace).srcs.changed_files
def test_parse_str():
for idx, i in enumerate(STRS_FOR_PARSING):
text = CodeParser.parse_str(f"{idx + 1}", i)
# logger.info(text)
assert text == "a"
def test_parse_blocks():
tasks = CodeParser.parse_blocks(TASKS)
logger.info(tasks.keys())
assert "Task list" in tasks.keys()
target_list = [
"smart_search_engine/knowledge_base.py",
"smart_search_engine/index.py",
"smart_search_engine/ranking.py",
"smart_search_engine/summary.py",
"smart_search_engine/search.py",
"smart_search_engine/main.py",
"smart_search_engine/interface.py",
"smart_search_engine/user_feedback.py",
"smart_search_engine/security.py",
"smart_search_engine/testing.py",
"smart_search_engine/monitoring.py",
]
def test_parse_file_list():
tasks = CodeParser.parse_file_list("Task list", TASKS)
logger.info(tasks)
assert isinstance(tasks, list)
assert target_list == tasks
target_code = """task_list = [
"smart_search_engine/knowledge_base.py",
"smart_search_engine/index.py",
"smart_search_engine/ranking.py",
"smart_search_engine/summary.py",
"smart_search_engine/search.py",
"smart_search_engine/main.py",
"smart_search_engine/interface.py",
"smart_search_engine/user_feedback.py",
"smart_search_engine/security.py",
"smart_search_engine/testing.py",
"smart_search_engine/monitoring.py",
]
"""
def test_parse_code():
code = CodeParser.parse_code(block="Task list", text=TASKS, lang="python")
logger.info(code)
assert isinstance(code, str)
assert target_code == code
def test_todo():
role = Engineer()
assert role.action_description == any_to_name(WriteCode)
@pytest.mark.asyncio
async def test_new_coding_context(context):
# Prerequisites
demo_path = Path(__file__).parent / "../../data/demo_project"
deps = json.loads(await aread(demo_path / "dependencies.json"))
dependency = await context.git_repo.get_dependency()
for k, v in deps.items():
await dependency.update(k, set(v))
data = await aread(demo_path / "system_design.json")
rqno = "20231221155954.json"
await awrite(context.repo.workdir / SYSTEM_DESIGN_FILE_REPO / rqno, data)
data = await aread(demo_path / "tasks.json")
await awrite(context.repo.workdir / TASK_FILE_REPO / rqno, data)
context.src_workspace = Path(context.repo.workdir) / "game_2048"
try:
filename = "game.py"
engineer = Engineer(context=context)
ctx_doc = await engineer._new_coding_doc(
filename=filename,
dependency=dependency,
)
assert ctx_doc
assert ctx_doc.filename == filename
assert ctx_doc.content
ctx = CodingContext.model_validate_json(ctx_doc.content)
assert ctx.filename == filename
assert ctx.design_doc
assert ctx.design_doc.content
assert ctx.task_doc
assert ctx.task_doc.content
assert ctx.code_doc
context.git_repo.add_change({f"{TASK_FILE_REPO}/{rqno}": ChangeType.UNTRACTED})
context.git_repo.commit("mock env")
await context.repo.with_src_path(context.src_workspace).srcs.save(filename=filename, content="content")
role = Engineer(context=context)
assert not role.code_todos
await role._new_code_actions()
assert role.code_todos
finally:
context.git_repo.delete_repository()
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/roles/test_role.py | tests/metagpt/roles/test_role.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : unittest of Role
import pytest
from metagpt.provider.human_provider import HumanProvider
from metagpt.roles.role import Role
from metagpt.schema import Message, UserMessage
def test_role_desc():
role = Role(profile="Sales", desc="Best Seller")
assert role.profile == "Sales"
assert role.desc == "Best Seller"
def test_role_human(context):
role = Role(is_human=True, context=context)
assert isinstance(role.llm, HumanProvider)
@pytest.mark.asyncio
async def test_recovered():
role = Role(profile="Tester", desc="Tester", recovered=True)
role.put_message(UserMessage(content="2"))
role.latest_observed_msg = Message(content="1")
await role._observe()
await role._observe()
assert role.rc.msg_buffer.empty()
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/roles/test_project_manager.py | tests/metagpt/roles/test_project_manager.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/12 10:23
@Author : alexanderwu
@File : test_project_manager.py
"""
import pytest
from metagpt.logs import logger
from metagpt.roles import ProjectManager
from tests.metagpt.roles.mock import MockMessages
@pytest.mark.asyncio
async def test_project_manager(context):
project_manager = ProjectManager(context=context)
rsp = await project_manager.run(MockMessages.tasks)
logger.info(rsp)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/roles/mock.py | tests/metagpt/roles/mock.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/12 13:05
@Author : alexanderwu
@File : mock_markdown.py
"""
import json
from metagpt.actions import UserRequirement, WriteDesign, WritePRD, WriteTasks
from metagpt.schema import Message
USER_REQUIREMENT = """开发一个基于大语言模型与私有知识库的搜索引擎,希望可以基于大语言模型进行搜索总结"""
DETAIL_REQUIREMENT = """需求:开发一个基于LLM(大语言模型)与私有知识库的搜索引擎,希望有几点能力
1. 用户可以在私有知识库进行搜索,再根据大语言模型进行总结,输出的结果包括了总结
2. 私有知识库可以实时更新,底层基于 ElasticSearch
3. 私有知识库支持pdf、word、txt等各种文件格式上传,上传后可以在服务端解析为文本,存储ES
资源:
1. 大语言模型已经有前置的抽象、部署,可以通过 `from metagpt.llm import LLM`,再使用`LLM().ask(prompt)`直接调用
2. Elastic已有[部署](http://192.168.50.82:9200/),代码可以直接使用这个部署"""
PRD = '''## 原始需求
```python
"""
我们希望开发一个基于大语言模型与私有知识库的搜索引擎。该搜索引擎应当能根据用户输入的查询进行智能搜索,并基于大语言模型对搜索结果进行总结,以便用户能够快速获取他们所需要的信息。该搜索引擎应当能够处理大规模的数据,同时保持搜索结果的准确性和相关性。我们希望这个产品能够降低用户在查找、筛选和理解信息时的工作负担,提高他们的工作效率。
"""
```
## 产品目标
```python
[
"提供高准确性、高相关性的搜索结果,满足用户的查询需求",
"基于大语言模型对搜索结果进行智能总结,帮助用户快速获取所需信息",
"处理大规模数据,保证搜索的速度和效率,提高用户的工作效率"
]
```
## 用户故事
```python
[
"假设用户是一名研究员,他正在为一项关于全球气候变化的报告做研究。他输入了'全球气候变化的最新研究',我们的搜索引擎快速返回了相关的文章、报告、数据集等。并且基于大语言模型对这些信息进行了智能总结,研究员可以快速了解到最新的研究趋势和发现。",
"用户是一名学生,正在为即将到来的历史考试复习。他输入了'二战的主要战役',搜索引擎返回了相关的资料,大语言模型总结出主要战役的时间、地点、结果等关键信息,帮助学生快速记忆。",
"用户是一名企业家,他正在寻找关于最新的市场趋势信息。他输入了'2023年人工智能市场趋势',搜索引擎返回了各种报告、新闻和分析文章。大语言模型对这些信息进行了总结,用户能够快速了解到市场的最新动态和趋势。"
]
```
## 竞品分析
```python
[
"Google Search:Google搜索是市场上最主要的搜索引擎,它能够提供海量的搜索结果。但Google搜索并不提供搜索结果的总结功能,用户需要自己去阅读和理解搜索结果。",
"Microsoft Bing:Bing搜索也能提供丰富的搜索结果,同样没有提供搜索结果的总结功能。",
"Wolfram Alpha:Wolfram Alpha是一个基于知识库的计算型搜索引擎,能够针对某些特定类型的查询提供直接的答案和总结,但它的知识库覆盖范围有限,无法处理大规模的数据。"
]
```
## 开发需求池
```python
[
("开发基于大语言模型的智能总结功能", 5),
("开发搜索引擎核心算法,包括索引构建、查询处理、结果排序等", 7),
("设计和实现用户界面,包括查询输入、搜索结果展示、总结结果展示等", 3),
("构建和维护私有知识库,包括数据采集、清洗、更新等", 7),
("优化搜索引擎性能,包括搜索速度、准确性、相关性等", 6),
("开发用户反馈机制,包括反馈界面、反馈处理等", 2),
("开发安全防护机制,防止恶意查询和攻击", 3),
("集成大语言模型,包括模型选择、优化、更新等", 5),
("进行大规模的测试,包括功能测试、性能测试、压力测试等", 5),
("开发数据监控和日志系统,用于监控搜索引擎的运行状态和性能", 4)
]
```
'''
SYSTEM_DESIGN = """## Project name
```python
"smart_search_engine"
```
## Task list:
```python
[
"smart_search_engine/__init__.py",
"smart_search_engine/main.py",
"smart_search_engine/search.py",
"smart_search_engine/index.py",
"smart_search_engine/ranking.py",
"smart_search_engine/summary.py",
"smart_search_engine/knowledge_base.py",
"smart_search_engine/interface.py",
"smart_search_engine/user_feedback.py",
"smart_search_engine/security.py",
"smart_search_engine/testing.py",
"smart_search_engine/monitoring.py"
]
```
## Data structures and interfaces
```mermaid
classDiagram
class Main {
-SearchEngine search_engine
+main() str
}
class SearchEngine {
-Index index
-Ranking ranking
-Summary summary
+search(query: str) str
}
class Index {
-KnowledgeBase knowledge_base
+create_index(data: dict)
+query_index(query: str) list
}
class Ranking {
+rank_results(results: list) list
}
class Summary {
+summarize_results(results: list) str
}
class KnowledgeBase {
+update(data: dict)
+fetch_data(query: str) dict
}
Main --> SearchEngine
SearchEngine --> Index
SearchEngine --> Ranking
SearchEngine --> Summary
Index --> KnowledgeBase
```
## Program call flow
```mermaid
sequenceDiagram
participant M as Main
participant SE as SearchEngine
participant I as Index
participant R as Ranking
participant S as Summary
participant KB as KnowledgeBase
M->>SE: search(query)
SE->>I: query_index(query)
I->>KB: fetch_data(query)
KB-->>I: return data
I-->>SE: return results
SE->>R: rank_results(results)
R-->>SE: return ranked_results
SE->>S: summarize_results(ranked_results)
S-->>SE: return summary
SE-->>M: return summary
```
"""
JSON_TASKS = {
"Logic Analysis": """
在这个项目中,所有的模块都依赖于“SearchEngine”类,这是主入口,其他的模块(Index、Ranking和Summary)都通过它交互。另外,"Index"类又依赖于"KnowledgeBase"类,因为它需要从知识库中获取数据。
- "main.py"包含"Main"类,是程序的入口点,它调用"SearchEngine"进行搜索操作,所以在其他任何模块之前,"SearchEngine"必须首先被定义。
- "search.py"定义了"SearchEngine"类,它依赖于"Index"、"Ranking"和"Summary",因此,这些模块需要在"search.py"之前定义。
- "index.py"定义了"Index"类,它从"knowledge_base.py"获取数据来创建索引,所以"knowledge_base.py"需要在"index.py"之前定义。
- "ranking.py"和"summary.py"相对独立,只需确保在"search.py"之前定义。
- "knowledge_base.py"是独立的模块,可以优先开发。
- "interface.py"、"user_feedback.py"、"security.py"、"testing.py"和"monitoring.py"看起来像是功能辅助模块,可以在主要功能模块开发完成后并行开发。
""",
"Task list": [
"smart_search_engine/knowledge_base.py",
"smart_search_engine/index.py",
"smart_search_engine/ranking.py",
"smart_search_engine/summary.py",
"smart_search_engine/search.py",
"smart_search_engine/main.py",
"smart_search_engine/interface.py",
"smart_search_engine/user_feedback.py",
"smart_search_engine/security.py",
"smart_search_engine/testing.py",
"smart_search_engine/monitoring.py",
],
}
TASKS = """## Logic Analysis
在这个项目中,所有的模块都依赖于“SearchEngine”类,这是主入口,其他的模块(Index、Ranking和Summary)都通过它交互。另外,"Index"类又依赖于"KnowledgeBase"类,因为它需要从知识库中获取数据。
- "main.py"包含"Main"类,是程序的入口点,它调用"SearchEngine"进行搜索操作,所以在其他任何模块之前,"SearchEngine"必须首先被定义。
- "search.py"定义了"SearchEngine"类,它依赖于"Index"、"Ranking"和"Summary",因此,这些模块需要在"search.py"之前定义。
- "index.py"定义了"Index"类,它从"knowledge_base.py"获取数据来创建索引,所以"knowledge_base.py"需要在"index.py"之前定义。
- "ranking.py"和"summary.py"相对独立,只需确保在"search.py"之前定义。
- "knowledge_base.py"是独立的模块,可以优先开发。
- "interface.py"、"user_feedback.py"、"security.py"、"testing.py"和"monitoring.py"看起来像是功能辅助模块,可以在主要功能模块开发完成后并行开发。
## Task list
```python
task_list = [
"smart_search_engine/knowledge_base.py",
"smart_search_engine/index.py",
"smart_search_engine/ranking.py",
"smart_search_engine/summary.py",
"smart_search_engine/search.py",
"smart_search_engine/main.py",
"smart_search_engine/interface.py",
"smart_search_engine/user_feedback.py",
"smart_search_engine/security.py",
"smart_search_engine/testing.py",
"smart_search_engine/monitoring.py",
]
```
这个任务列表首先定义了最基础的模块,然后是依赖这些模块的模块,最后是辅助模块。可以根据团队的能力和资源,同时开发多个任务,只要满足依赖关系。例如,在开发"search.py"之前,可以同时开发"knowledge_base.py"、"index.py"、"ranking.py"和"summary.py"。
"""
TASKS_TOMATO_CLOCK = '''## Required Python third-party packages: Provided in requirements.txt format
```python
Flask==2.1.1
Jinja2==3.1.0
Bootstrap==5.3.0-alpha1
```
## Logic Analysis: Provided as a Python str, analyze the dependencies between the files, which work should be done first
```python
"""
1. Start by setting up the Flask app, config.py, and requirements.txt to create the basic structure of the web application.
2. Create the timer functionality using JavaScript and the Web Audio API in the timer.js file.
3. Develop the frontend templates (index.html and settings.html) using Jinja2 and integrate the timer functionality.
4. Add the necessary static files (main.css, main.js, and notification.mp3) for styling and interactivity.
5. Implement the ProgressBar class in main.js and integrate it with the Timer class in timer.js.
6. Write tests for the application in test_app.py.
"""
```
## Task list: Provided as Python list[str], each str is a file, the more at the beginning, the more it is a prerequisite dependency, should be done first
```python
task_list = [
'app.py',
'config.py',
'requirements.txt',
'static/js/timer.js',
'templates/index.html',
'templates/settings.html',
'static/css/main.css',
'static/js/main.js',
'static/audio/notification.mp3',
'static/js/progressbar.js',
'tests/test_app.py'
]
```
'''
TASK = """smart_search_engine/knowledge_base.py"""
STRS_FOR_PARSING = [
"""
## 1
```python
a
```
""",
"""
##2
```python
"a"
```
""",
"""
## 3
```python
a = "a"
```
""",
"""
## 4
```python
a = 'a'
```
""",
]
class MockMessages:
req = Message(role="User", content=USER_REQUIREMENT, cause_by=UserRequirement)
prd = Message(role="Product Manager", content=PRD, cause_by=WritePRD)
system_design = Message(role="Architect", content=SYSTEM_DESIGN, cause_by=WriteDesign)
tasks = Message(role="Project Manager", content=TASKS, cause_by=WriteTasks)
json_tasks = Message(
role="Project Manager", content=json.dumps(JSON_TASKS, ensure_ascii=False), cause_by=WriteTasks
)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/roles/test_researcher.py | tests/metagpt/roles/test_researcher.py | import tempfile
from pathlib import Path
from random import random
from tempfile import TemporaryDirectory
import pytest
from metagpt.actions.research import CollectLinks
from metagpt.roles import researcher
from metagpt.team import Team
from metagpt.tools import SearchEngineType
from metagpt.tools.search_engine import SearchEngine
async def mock_llm_ask(self, prompt: str, system_msgs):
if "Please provide up to 2 necessary keywords" in prompt:
return '["dataiku", "datarobot"]'
elif "Provide up to 4 queries related to your research topic" in prompt:
return (
'["Dataiku machine learning platform", "DataRobot AI platform comparison", '
'"Dataiku vs DataRobot features", "Dataiku and DataRobot use cases"]'
)
elif "sort the remaining search results" in prompt:
return "[1,2]"
elif "Not relevant." in prompt:
return "Not relevant" if random() > 0.5 else prompt[-100:]
elif "provide a detailed research report" in prompt:
return f"# Research Report\n## Introduction\n{prompt}"
return ""
@pytest.mark.asyncio
async def test_researcher(mocker, search_engine_mocker, context):
with TemporaryDirectory() as dirname:
topic = "dataiku vs. datarobot"
mocker.patch("metagpt.provider.base_llm.BaseLLM.aask", mock_llm_ask)
researcher.RESEARCH_PATH = Path(dirname)
role = researcher.Researcher(context=context)
for i in role.actions:
if isinstance(i, CollectLinks):
i.search_engine = SearchEngine(engine=SearchEngineType.DUCK_DUCK_GO)
await role.run(topic)
assert (researcher.RESEARCH_PATH / f"{topic}.md").read_text().startswith("# Research Report")
def test_write_report(mocker, context):
with TemporaryDirectory() as dirname:
for i, topic in enumerate(
[
("1./metagpt"),
('2.:"metagpt'),
("3.*?<>|metagpt"),
("4. metagpt\n"),
]
):
researcher.RESEARCH_PATH = Path(dirname)
content = "# Research Report"
researcher.Researcher(context=context).write_report(topic, content)
assert (researcher.RESEARCH_PATH / f"{i+1}. metagpt.md").read_text().startswith("# Research Report")
@pytest.mark.asyncio
async def test_serialize():
team = Team()
team.hire([researcher.Researcher()])
with tempfile.TemporaryDirectory() as dirname:
team.serialize(Path(dirname) / "team.json")
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/roles/__init__.py | tests/metagpt/roles/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/12 10:14
@Author : alexanderwu
@File : __init__.py
"""
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/roles/test_product_manager.py | tests/metagpt/roles/test_product_manager.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/16 14:50
@Author : alexanderwu
@File : test_product_manager.py
"""
import json
import pytest
from metagpt.actions import WritePRD
from metagpt.context import Context
from metagpt.logs import logger
from metagpt.roles import ProductManager
from metagpt.utils.common import any_to_str
from metagpt.utils.git_repository import GitRepository
from tests.metagpt.roles.mock import MockMessages
@pytest.mark.asyncio
async def test_product_manager(new_filename):
context = Context()
try:
product_manager = ProductManager(context=context)
# prepare documents
logger.info(MockMessages.req)
rsp = await product_manager.run(MockMessages.req)
logger.info(rsp)
assert rsp.cause_by == any_to_str(WritePRD)
# assert REQUIREMENT_FILENAME in context.repo.docs.changed_files
logger.info(rsp)
assert len(rsp.content) > 0
doc = list(rsp.instruct_content.docs.values())[0]
m = json.loads(doc.content)
assert m["Original Requirements"] == MockMessages.req.content
# nothing to do
rsp = await product_manager.run(rsp)
assert rsp is None
except Exception as e:
assert not e
finally:
# Clean up using the project path
if context.config.project_path:
git_repo = GitRepository(context.config.project_path)
git_repo.delete_repository()
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/roles/test_tutorial_assistant.py | tests/metagpt/roles/test_tutorial_assistant.py | #!/usr/bin/env python3
# _*_ coding: utf-8 _*_
"""
@Time : 2023/9/6 23:11:27
@Author : Stitch-z
@File : test_tutorial_assistant.py
"""
import pytest
from metagpt.const import TUTORIAL_PATH
from metagpt.roles.tutorial_assistant import TutorialAssistant
from metagpt.utils.common import aread
@pytest.mark.asyncio
@pytest.mark.parametrize(("language", "topic"), [("Chinese", "Write a tutorial about pip")])
async def test_tutorial_assistant(language: str, topic: str, context):
role = TutorialAssistant(language=language, context=context)
msg = await role.run(topic)
assert TUTORIAL_PATH.exists()
filename = msg.content
content = await aread(filename=filename)
assert "pip" in content
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/roles/test_qa_engineer.py | tests/metagpt/roles/test_qa_engineer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/12 12:01
@Author : alexanderwu
@File : test_qa_engineer.py
"""
from pathlib import Path
from typing import List
import pytest
from pydantic import Field
from metagpt.actions import DebugError, RunCode, WriteTest
from metagpt.actions.summarize_code import SummarizeCode
from metagpt.environment import Environment
from metagpt.roles import QaEngineer
from metagpt.schema import Message
from metagpt.utils.common import any_to_str, aread, awrite
async def test_qa(context):
# Prerequisites
demo_path = Path(__file__).parent / "../../data/demo_project"
context.src_workspace = Path(context.repo.workdir) / "qa/game_2048"
data = await aread(filename=demo_path / "game.py", encoding="utf-8")
await awrite(filename=context.src_workspace / "game.py", data=data, encoding="utf-8")
await awrite(filename=Path(context.repo.workdir) / "requirements.txt", data="")
class MockEnv(Environment):
msgs: List[Message] = Field(default_factory=list)
def publish_message(self, message: Message, peekable: bool = True) -> bool:
self.msgs.append(message)
return True
env = MockEnv()
role = QaEngineer(context=context)
role.set_env(env)
await role.run(with_message=Message(content="", cause_by=SummarizeCode))
assert env.msgs
assert env.msgs[0].cause_by == any_to_str(WriteTest)
msg = env.msgs[0]
env.msgs.clear()
await role.run(with_message=msg)
assert env.msgs
assert env.msgs[0].cause_by == any_to_str(RunCode)
msg = env.msgs[0]
env.msgs.clear()
await role.run(with_message=msg)
assert env.msgs
assert env.msgs[0].cause_by == any_to_str(DebugError)
msg = env.msgs[0]
env.msgs.clear()
role.test_round_allowed = 1
rsp = await role.run(with_message=msg)
assert "Exceeding" in rsp.content
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/roles/test_teacher.py | tests/metagpt/roles/test_teacher.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/7/27 13:25
@Author : mashenquan
@File : test_teacher.py
"""
from typing import Dict, Optional
import pytest
from pydantic import BaseModel, Field
from metagpt.context import Context
from metagpt.roles.teacher import Teacher
from metagpt.schema import Message
@pytest.mark.asyncio
async def test_init():
class Inputs(BaseModel):
name: str
profile: str
goal: str
constraints: str
desc: str
kwargs: Optional[Dict] = None
expect_name: str
expect_profile: str
expect_goal: str
expect_constraints: str
expect_desc: str
exclude: list = Field(default_factory=list)
inputs = [
{
"name": "Lily{language}",
"expect_name": "Lily{language}",
"profile": "X {teaching_language}",
"expect_profile": "X {teaching_language}",
"goal": "Do {something_big}, {language}",
"expect_goal": "Do {something_big}, {language}",
"constraints": "Do in {key1}, {language}",
"expect_constraints": "Do in {key1}, {language}",
"kwargs": {},
"desc": "aaa{language}",
"expect_desc": "aaa{language}",
"exclude": ["language", "key1", "something_big", "teaching_language"],
},
{
"name": "Lily{language}",
"expect_name": "LilyCN",
"profile": "X {teaching_language}",
"expect_profile": "X EN",
"goal": "Do {something_big}, {language}",
"expect_goal": "Do sleep, CN",
"constraints": "Do in {key1}, {language}",
"expect_constraints": "Do in HaHa, CN",
"kwargs": {"language": "CN", "key1": "HaHa", "something_big": "sleep", "teaching_language": "EN"},
"desc": "aaa{language}",
"expect_desc": "aaaCN",
"language": "CN",
"teaching_language": "EN",
},
]
for i in inputs:
seed = Inputs(**i)
context = Context()
for k in seed.exclude:
context.kwargs.set(k, None)
for k, v in seed.kwargs.items():
context.kwargs.set(k, v)
teacher = Teacher(
context=context,
name=seed.name,
profile=seed.profile,
goal=seed.goal,
constraints=seed.constraints,
desc=seed.desc,
)
assert teacher.name == seed.expect_name
assert teacher.desc == seed.expect_desc
assert teacher.profile == seed.expect_profile
assert teacher.goal == seed.expect_goal
assert teacher.constraints == seed.expect_constraints
assert teacher.course_title == "teaching_plan"
@pytest.mark.asyncio
async def test_new_file_name():
class Inputs(BaseModel):
lesson_title: str
ext: str
expect: str
inputs = [
{"lesson_title": "# @344\n12", "ext": ".md", "expect": "_344_12.md"},
{"lesson_title": "1#@$%!*&\\/:*?\"<>|\n\t '1", "ext": ".cc", "expect": "1_1.cc"},
]
for i in inputs:
seed = Inputs(**i)
result = Teacher.new_file_name(seed.lesson_title, seed.ext)
assert result == seed.expect
@pytest.mark.asyncio
async def test_run():
lesson = """
UNIT 1 Making New Friends
TOPIC 1 Welcome to China!
Section A
1a Listen and number the following names.
Jane Mari Kangkang Michael
Look, listen and understand. Then practice the conversation.
Work in groups. Introduce yourself using
I ’m ... Then practice 1a
with your own hometown or the following places.
1b Listen and number the following names
Jane Michael Maria Kangkang
1c Work in groups. Introduce yourself using I ’m ... Then practice 1a with your own hometown or the following places.
China the USA the UK Hong Kong Beijing
2a Look, listen and understand. Then practice the conversation
Hello!
Hello!
Hello!
Hello! Are you Maria?
No, I’m not. I’m Jane.
Oh, nice to meet you, Jane
Nice to meet you, too.
Hi, Maria!
Hi, Kangkang!
Welcome to China!
Thanks.
2b Work in groups. Make up a conversation with your own name and the
following structures.
A: Hello! / Good morning! / Hi! I’m ... Are you ... ?
B: ...
3a Listen, say and trace
Aa Bb Cc Dd Ee Ff Gg
3b Listen and number the following letters. Then circle the letters with the same sound as Bb.
Aa Bb Cc Dd Ee Ff Gg
3c Match the big letters with the small ones. Then write them on the lines.
"""
context = Context()
context.kwargs.language = "Chinese"
context.kwargs.teaching_language = "English"
teacher = Teacher(context=context)
rsp = await teacher.run(Message(content=lesson))
assert rsp
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/roles/di/run_architect.py | tests/metagpt/roles/di/run_architect.py | import asyncio
import os
from metagpt.roles.architect import Architect
from metagpt.schema import Message
DESIGN_DOC_SNAKE = """
{
"Implementation approach": "We will use the Pygame library to create the CLI-based snake game. Pygame is a set of Python modules designed for writing video games, which will help us handle graphics, sound, and input. The game will be structured into different modules to handle the main game loop, snake movement, food generation, collision detection, and user interface. We will ensure the game is engaging and responsive by optimizing the game loop and input handling. The score display and different speed levels will be implemented to enhance the user experience.",
"File list": [
"main.py",
"game.py",
"snake.py",
"food.py",
"ui.py"
],
"Data structures and interfaces": "\nclassDiagram\n class Main {\n +main() void\n }\n class Game {\n -Snake snake\n -Food food\n -int score\n -int speed\n +__init__(speed: int)\n +run() void\n +restart() void\n +update_score() void\n }\n class Snake {\n -list body\n -str direction\n +__init__()\n +move() void\n +change_direction(new_direction: str) void\n +check_collision() bool\n +grow() void\n }\n class Food {\n -tuple position\n +__init__()\n +generate_new_position() void\n }\n class UI {\n +display_score(score: int) void\n +display_game_over() void\n +display_game(snake: Snake, food: Food) void\n }\n Main --> Game\n Game --> Snake\n Game --> Food\n Game --> UI\n",
"Program call flow": "\nsequenceDiagram\n participant M as Main\n participant G as Game\n participant S as Snake\n participant F as Food\n participant U as UI\n M->>G: __init__(speed)\n M->>G: run()\n G->>S: __init__()\n G->>F: __init__()\n loop Game Loop\n G->>S: move()\n G->>S: check_collision()\n alt Collision Detected\n G->>G: restart()\n G->>U: display_game_over()\n else No Collision\n G->>F: generate_new_position()\n G->>S: grow()\n G->>G: update_score()\n G->>U: display_score(score)\n end\n G->>U: display_game(snake, food)\n end\n",
"Anything UNCLEAR": "Currently, all aspects of the project are clear."
}
"""
WRITE_SNAKE = """Write a system design for a cli snake game with pygame"""
REWRITE_SNAKE = """Rewrite the system design at temp_design.json, add a web UI"""
CASUAL_CHAT = """What's your name?"""
async def main(requirement):
with open("temp_design.json", "w") as f:
f.write(DESIGN_DOC_SNAKE)
architect = Architect()
await architect.run(Message(content=requirement, send_to="Bob"))
os.remove("temp_design.json")
if __name__ == "__main__":
asyncio.run(main(REWRITE_SNAKE))
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/roles/di/test_team_leader.py | tests/metagpt/roles/di/test_team_leader.py | import pytest
from metagpt.environment.mgx.mgx_env import MGXEnv
from metagpt.roles import (
Architect,
Engineer,
ProductManager,
ProjectManager,
QaEngineer,
)
from metagpt.roles.di.data_interpreter import DataInterpreter
from metagpt.roles.di.team_leader import TeamLeader
from metagpt.schema import Message
@pytest.fixture
def env():
test_env = MGXEnv()
tl = TeamLeader()
da = DataInterpreter(
name="David",
profile="Data Analyst",
goal="Take on any data-related tasks, such as data analysis, machine learning, deep learning, web browsing, web scraping, web searching, web deployment, terminal operation, git operation, etc.",
react_mode="react",
)
test_env.add_roles(
[
tl,
ProductManager(),
Architect(),
ProjectManager(),
Engineer(n_borg=5, use_code_review=True),
QaEngineer(),
da,
]
)
return test_env
@pytest.mark.asyncio
async def test_plan_for_software_requirement(env):
requirement = "create a 2048 game"
tl = env.get_role("Mike")
env.publish_message(Message(content=requirement, send_to=tl.name))
await tl.run()
history = env.history.get()
messages_to_team = [msg for msg in history if msg.sent_from == tl.name]
pm_messages = [msg for msg in messages_to_team if "Alice" in msg.send_to]
assert len(pm_messages) > 0, "Should have message sent to Product Manager"
found_task_msg = False
for msg in messages_to_team:
if "prd" in msg.content.lower() and any(role in msg.content for role in ["Alice", "Bob", "Alex", "David"]):
found_task_msg = True
break
assert found_task_msg, "Should have task assignment message"
@pytest.mark.asyncio
async def test_plan_for_data_related_requirement(env):
requirement = "I want to use yolov5 for target detection, yolov5 all the information from the following link, please help me according to the content of the link (https://github.com/ultralytics/yolov5), set up the environment and download the model parameters, and finally provide a few pictures for inference, the inference results will be saved!"
tl = env.get_role("Mike")
env.publish_message(Message(content=requirement, send_to=tl.name))
await tl.run()
history = env.history.get()
messages_from_tl = [msg for msg in history if msg.sent_from == tl.name]
da_messages = [msg for msg in messages_from_tl if "David" in msg.send_to]
assert len(da_messages) > 0
da_message = da_messages[0]
assert "https://github.com/ultralytics/yolov5" in da_message.content
def is_valid_task_message(msg: Message) -> bool:
content = msg.content.lower()
has_model_info = "yolov5" in content
has_task_info = any(word in content for word in ["detection", "inference", "environment", "parameters"])
has_link = "github.com" in content
return has_model_info and has_task_info and has_link
assert is_valid_task_message(da_message)
@pytest.mark.asyncio
async def test_plan_for_mixed_requirement(env):
requirement = "Search the web for the new game 2048X, then replicate it"
tl = env.get_role("Mike")
env.publish_message(Message(content=requirement, send_to=tl.name))
await tl.run()
history = env.history.get()
messages_from_tl = [msg for msg in history if msg.sent_from == tl.name]
da_messages = [msg for msg in messages_from_tl if "David" in msg.send_to]
assert len(da_messages) > 0
da_message = da_messages[0]
def is_valid_search_task(msg: Message) -> bool:
content = msg.content.lower()
return "2048x" in content and "search" in content
assert is_valid_search_task(da_message)
PRD_MSG_CONTENT = """{'docs': {'20240424153821.json': {'root_path': 'docs/prd', 'filename': '20240424153821.json', 'content': '{"Language":"en_us","Programming Language":"Python","Original Requirements":"create a 2048 game","Project Name":"game_2048","Product Goals":["Develop an intuitive and addictive 2048 game variant","Ensure the game is accessible and performs well on various devices","Design a visually appealing and modern user interface"],"User Stories":["As a player, I want to be able to undo my last move so I can correct mistakes","As a player, I want to see my high scores to track my progress over time","As a player, I want to be able to play the game without any internet connection"],"Competitive Analysis":["2048 Original: Classic gameplay, minimalistic design, lacks social sharing features","2048 Hex: Unique hexagon board, but not mobile-friendly","2048 Multiplayer: Offers real-time competition, but overwhelming ads","2048 Bricks: Innovative gameplay with bricks, but poor performance on older devices","2048.io: Multiplayer battle royale mode, but complicated UI for new players","2048 Animated: Animated tiles add fun, but the game consumes a lot of battery","2048 3D: 3D version of the game, but has a steep learning curve"],"Competitive Quadrant Chart":"quadrantChart\\n title \\"User Experience and Feature Set of 2048 Games\\"\\n x-axis \\"Basic Features\\" --> \\"Rich Features\\"\\n y-axis \\"Poor Experience\\" --> \\"Great Experience\\"\\n quadrant-1 \\"Need Improvement\\"\\n quadrant-2 \\"Feature-Rich but Complex\\"\\n quadrant-3 \\"Simplicity with Poor UX\\"\\n quadrant-4 \\"Balanced\\"\\n \\"2048 Original\\": [0.2, 0.7]\\n \\"2048 Hex\\": [0.3, 0.4]\\n \\"2048 Multiplayer\\": [0.6, 0.5]\\n \\"2048 Bricks\\": [0.4, 0.3]\\n \\"2048.io\\": [0.7, 0.4]\\n \\"2048 Animated\\": [0.5, 0.6]\\n \\"2048 3D\\": [0.6, 0.3]\\n \\"Our Target Product\\": [0.8, 0.9]","Requirement Analysis":"The game must be engaging and retain players, which requires a balance of simplicity and challenge. Accessibility on various devices is crucial for a wider reach. A modern UI is needed to attract and retain the modern user. The ability to play offline is important for users on the go. High score tracking and the ability to undo moves are features that will enhance user experience.","Requirement Pool":[["P0","Implement core 2048 gameplay mechanics"],["P0","Design responsive UI for multiple devices"],["P1","Develop undo move feature"],["P1","Integrate high score tracking system"],["P2","Enable offline gameplay capability"]],"UI Design draft":"The UI will feature a clean and modern design with a minimalist color scheme. The game board will be center-aligned with smooth tile animations. Score and high score will be displayed at the top. Undo and restart buttons will be easily accessible. The design will be responsive to fit various screen sizes.","Anything UNCLEAR":"The monetization strategy for the game is not specified. Further clarification is needed on whether the game should include advertisements, in-app purchases, or be completely free."}'}}}"""
DESIGN_CONTENT = """{"docs":{"20240424214432.json":{"root_path":"docs/system_design","filename":"20240424214432.json","content":"{\\"Implementation approach\\":\\"We will develop the 2048 game using Python, leveraging the pygame library for rendering the game interface and handling user input. This library is suitable for creating games and is widely used in the open-source community. We will ensure that the game logic is separated from the UI code to maintain a clean architecture. The game will be designed to be responsive and accessible on both desktop and mobile devices using scalable dimensions and touch-friendly controls.\\",\\"File list\\":[\\"main.py\\",\\"game.py\\",\\"ui.py\\",\\"constants.py\\",\\"logic.py\\"],\\"Data structures and interfaces\\":\\"\\\\nclassDiagram\\\\n class Main {\\\\n +main() void\\\\n }\\\\n class Game {\\\\n -UI ui\\\\n -Logic logic\\\\n +start_game() void\\\\n +restart_game() void\\\\n }\\\\n class UI {\\\\n -current_score int\\\\n -high_score int\\\\n +draw_board(board: list) void\\\\n +update_score(score: int) void\\\\n +show_game_over() void\\\\n }\\\\n class Logic {\\\\n -board list\\\\n -score int\\\\n +move(direction: str) bool\\\\n +check_game_over() bool\\\\n +get_current_score() int\\\\n +get_high_score() int\\\\n +reset_game() void\\\\n }\\\\n class Constants {\\\\n +BOARD_SIZE int\\\\n +INITIAL_TILES int\\\\n }\\\\n Main --> Game\\\\n Game --> UI\\\\n Game --> Logic\\\\n\\",\\"Program call flow\\":\\"\\\\nsequenceDiagram\\\\n participant M as Main\\\\n participant G as Game\\\\n participant UI as UI\\\\n participant L as Logic\\\\n M->>G: start_game()\\\\n loop Game Loop\\\\n G->>UI: draw_board(board)\\\\n G->>L: move(direction)\\\\n alt if move successful\\\\n L-->>G: return true\\\\n G->>UI: update_score(score)\\\\n else if move not successful\\\\n L-->>G: return false\\\\n end\\\\n G->>L: check_game_over()\\\\n alt if game over\\\\n L-->>G: return true\\\\n G->>UI: show_game_over()\\\\n G->>G: restart_game()\\\\n else\\\\n L-->>G: return false\\\\n end\\\\n end\\\\n\\",\\"Anything UNCLEAR\\":\\"Clarification needed on the specific touch-friendly controls for mobile devices and how they will be implemented using pygame.\\"}"}}}"""
@pytest.mark.asyncio
async def test_plan_update_and_routing(env):
requirement = "create a 2048 game"
tl = env.get_role("Mike")
env.publish_message(Message(content=requirement))
await tl.run()
# Verify message routing after PM completes task
env.publish_message(Message(content=PRD_MSG_CONTENT, sent_from="Alice", send_to={"<all>"}))
await tl.run()
# Get message history
history = env.history.get()
messages_from_tl = [msg for msg in history if msg.sent_from == tl.name]
# Verify messages sent to architect
architect_messages = [msg for msg in messages_from_tl if "Bob" in msg.send_to]
assert len(architect_messages) > 0, "Should have message forwarded to architect"
# Verify message content contains PRD info
architect_message = architect_messages[-1]
assert "2048 game based on the PRD" in architect_message.content, "Message to architect should contain PRD info"
# Verify message routing after architect completes task
env.publish_message(Message(content=DESIGN_CONTENT, sent_from="Bob", send_to={"<all>"}))
await tl.run()
@pytest.mark.asyncio
async def test_reply_to_human(env):
requirement = "create a 2048 game"
tl = env.get_role("Mike")
env.publish_message(Message(content=requirement))
await tl.run()
# PM finishes task
env.publish_message(Message(content=PRD_MSG_CONTENT, sent_from="Alice", send_to={"<all>"}))
await tl.run()
# Get history before human inquiry
history_before = env.history.get()
# Human inquires about progress
env.publish_message(Message(content="Who is working? How does the project go?", send_to={tl.name}))
await tl.run()
# Get new messages after human inquiry
history_after = env.history.get()
new_messages = [msg for msg in history_after if msg not in history_before]
# Verify team leader's response
tl_responses = [msg for msg in new_messages if msg.sent_from == tl.name]
assert len(tl_responses) > 0, "Should have response from team leader"
# Verify response contains project status
response = tl_responses[0].content
assert any(
keyword in response.lower() for keyword in ["progress", "status", "working"]
), "Response should contain project status information"
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/roles/di/run_product_manager.py | tests/metagpt/roles/di/run_product_manager.py | import asyncio
import sys
from metagpt.logs import logger
from metagpt.roles import ProductManager
CASE0_WRITE_2048 = """Write a PRD for a cli 2048 game"""
CASE1_GREEDY_SNAKE = "设计一个贪吃蛇游戏"
CASE2_SMART_HOME = "搜索并分析米家、华为智能家居和海尔智家在智能家居市场中的功能、用户需求和市场定位"
CASE3_BEST_SELLING_REFRIGERATOR = "调研当前市场上最畅销的智能冰箱的五个关键特性"
OLD_PRD = """
Language
en_us
Programming Language
N/A
Original Requirements
Write a PRD based on the current music streaming service.
Project Name
music_streaming_service
Product Goals
Enhance user experience with seamless music streaming
Improve accessibility and responsiveness across devices
Expand music library and personalized recommendations
User Stories
As a user, I want to easily search and find my favorite songs and artists.
As a user, I want to create and manage my own playlists.
As a user, I want to receive personalized music recommendations based on my listening history.
As a user, I want to stream music without interruptions or buffering.
As a user, I want to access the service on both desktop and mobile devices.
Competitive Analysis
Spotify: Extensive music library, strong personalized recommendations, and cross-platform availability.
Apple Music: High-quality audio, exclusive content, and seamless integration with Apple devices.
Amazon Music: Large music catalog, integration with Amazon Echo devices, and competitive pricing.
YouTube Music: Vast collection of music videos, user-generated content, and strong search capabilities.
Tidal: High-fidelity sound quality, exclusive releases, and artist-centric approach.
Competitive Quadrant Chart
quadrantChart title "Feature Richness vs. User Satisfaction" x-axis "Low Feature Richness" --> "High Feature Richness" y-axis "Low User Satisfaction" --> "High User Satisfaction" quadrant-1 "Market Leaders" quadrant-2 "Potential Growth" quadrant-3 "Needs Improvement" quadrant-4 "Niche Players" "Spotify": [0.9, 0.85] "Apple Music": [0.85, 0.8] "Amazon Music": [0.75, 0.7] "YouTube Music": [0.8, 0.75] "Tidal": [0.7, 0.65] "Our Target Product": [0.8, 0.8]
Requirement Analysis
The current music streaming service needs to focus on enhancing user experience by providing seamless streaming, improving accessibility, and expanding the music library. Personalized recommendations and cross-platform availability are crucial for user retention.
Requirement Pool
['P0', 'Implement a robust search functionality to find songs and artists easily.']
['P0', 'Develop a feature for users to create and manage playlists.']
['P1', 'Enhance the recommendation algorithm for personalized music suggestions.']
['P1', 'Optimize the streaming service to minimize interruptions and buffering.']
['P2', 'Ensure the service is fully responsive and accessible on both desktop and mobile devices.']
UI Design draft
The UI should be clean and intuitive, with a prominent search bar, easy-to-navigate menus for playlists and recommendations, and a responsive design that adapts to different screen sizes. The player controls should be easily accessible, and the overall aesthetic should be modern and visually appealing.
Anything UNCLEAR
Currently, all aspects of the project are clear.
"""
CASE4_MUSIC_STREAMING_MEDIA = f"""We have received feedback from users regarding the current music streaming service, stating that they need better personalized recommendations. Please readjust the content of PRD {OLD_PRD} based on these feedback."""
CASE5_SMART_BIG_SCREEN = """分析2024年上半年中国家庭智能大屏行业的发展情况并输出市场分析报告"""
CASE6_ELECTRONIC_CIGARETTE = """我想要生产一个电子烟产品,请帮我完成市场调研分析报告"""
def main():
cases = [
# CASE0_WRITE_2048,
# CASE1_GREEDY_SNAKE,
# CASE2_SMART_HOME,
# CASE3_BEST_SELLING_REFRIGERATOR,
# CASE4_MUSIC_STREAMING_MEDIA,
CASE5_SMART_BIG_SCREEN,
# CASE6_ELECTRONIC_CIGARETTE,
]
root_path = "/tmp"
logger.remove()
logger.add(sys.stderr, level="INFO")
for case in cases:
case += f"\nroot path: '{root_path}'"
logger.info(f"user requirement:\n{case}")
try:
product_manager = ProductManager()
asyncio.run(product_manager.run(case))
except Exception as e:
print(e)
if __name__ == "__main__":
main()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/roles/di/test_role_zero.py | tests/metagpt/roles/di/test_role_zero.py | import pytest
from metagpt.actions import UserRequirement
from metagpt.logs import logger
from metagpt.roles.di.role_zero import RoleZero
from metagpt.schema import Message
@pytest.mark.asyncio
async def test_model_validators():
"""Test all model validators"""
role = RoleZero()
# Test set_plan_and_tool
assert role.react_mode == "react"
assert role.planner is not None
# Test set_tool_execution
assert "Plan.append_task" in role.tool_execution_map
assert "RoleZero.ask_human" in role.tool_execution_map
# Test set_longterm_memory
assert role.rc.memory is not None
@pytest.mark.asyncio
async def test_think_react_cycle():
"""Test the think-react cycle"""
# Setup test conditions
role = RoleZero(tools=["Plan"])
role.rc.todo = True
role.planner.plan.goal = "Test goal"
role.respond_language = "English"
# Test _think
result = await role._think()
assert result is True
role.rc.news = [Message(content="Test", cause_by=UserRequirement())]
result = await role._react()
logger.info(result)
assert isinstance(result, Message)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/roles/di/test_data_analyst.py | tests/metagpt/roles/di/test_data_analyst.py | from unittest.mock import AsyncMock
import pytest
from metagpt.actions.di.execute_nb_code import ExecuteNbCode
from metagpt.actions.di.write_analysis_code import WriteAnalysisCode
from metagpt.logs import logger
from metagpt.roles.di.data_analyst import DataAnalyst
from metagpt.tools.tool_recommend import BM25ToolRecommender
class TestDataAnalyst:
def test_init(self):
analyst = DataAnalyst()
assert analyst.name == "David"
assert analyst.profile == "DataAnalyst"
assert "Browser" in analyst.tools
assert isinstance(analyst.write_code, WriteAnalysisCode)
assert isinstance(analyst.execute_code, ExecuteNbCode)
def test_set_custom_tool(self):
analyst = DataAnalyst()
analyst.custom_tools = ["web scraping", "Terminal"]
assert isinstance(analyst.custom_tool_recommender, BM25ToolRecommender)
@pytest.mark.asyncio
async def test_write_and_exec_code_no_task(self):
analyst = DataAnalyst()
result = await analyst.write_and_exec_code()
logger.info(result)
assert "No current_task found" in result
@pytest.mark.asyncio
async def test_write_and_exec_code_success(self):
analyst = DataAnalyst()
await analyst.execute_code.init_code()
analyst.planner.plan.goal = "construct a two-dimensional array"
analyst.planner.plan.append_task(
task_id="1",
dependent_task_ids=[],
instruction="construct a two-dimensional array",
assignee="David",
task_type="DATA_ANALYSIS",
)
result = await analyst.write_and_exec_code("construct a two-dimensional array")
logger.info(result)
assert "Success" in result
@pytest.mark.asyncio
async def test_write_and_exec_code_failure(self):
analyst = DataAnalyst()
await analyst.execute_code.init_code()
analyst.planner.plan.goal = "Execute a code that fails"
analyst.planner.plan.append_task(
task_id="1", dependent_task_ids=[], instruction="Execute a code that fails", assignee="David"
)
analyst.execute_code.run = AsyncMock(return_value=("Error: Division by zero", False))
result = await analyst.write_and_exec_code("divide by zero")
logger.info(result)
assert "Failed" in result
assert "Error: Division by zero" in result
@pytest.mark.asyncio
async def test_run_special_command(self):
analyst = DataAnalyst()
analyst.planner.plan.goal = "test goal"
analyst.planner.plan.append_task(task_id="1", dependent_task_ids=[], instruction="test task", assignee="David")
assert not analyst.planner.plan.is_plan_finished()
cmd = {"command_name": "end"}
result = await analyst._run_special_command(cmd)
assert "All tasks are finished" in result
assert analyst.planner.plan.is_plan_finished()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/roles/di/run_swe_agent_for_benchmark.py | tests/metagpt/roles/di/run_swe_agent_for_benchmark.py | import argparse
import asyncio
import json
import os
import shutil
import sys
from datetime import datetime
from pathlib import Path
from metagpt.config2 import config
from metagpt.const import DEFAULT_WORKSPACE_ROOT, METAGPT_ROOT
from metagpt.logs import logger
from metagpt.roles.di.engineer2 import Engineer2
from metagpt.tools.libs.editor import Editor
from metagpt.tools.libs.terminal import Terminal
from metagpt.tools.swe_agent_commands.swe_agent_utils import load_hf_dataset
# Specify by yourself
TEST_REPO_DIR = METAGPT_ROOT / "data" / "test_repo"
DATA_DIR = METAGPT_ROOT / "data/hugging_face"
INSTANCE_TEMPLATE = """
## User Requirement
Fix the bug in the repo. Because the environment is not available, you DO NOT need to run and modify any existing test case files or add new test case files to ensure that the bug is fixed.
We're currently solving the following issue within our repository. You can use any bash commands or the special interface to help you. Here's the issue and hints text:
## ISSUE
{issue}
## HINTS
hints text is the comment under issue:
{hints_text}
The repository may already exist at the path `{repo_path}`. If it doesn't, please download the repository to this path.
Your first action must be to navigate to the repository path `{repo_path}`.
This issue occurred in version {version}, with the corresponding base commit being {base_commit}. You need to switch to the code version associated with this commit.
All subsequent actions must be performed within this repository path. Do not leave this directory to execute any actions at any time.
# INSTRUCTIONS:
Now, you're going to solve this issue on your own from the perspective of a programmer. Your terminal session has started and you're in the repository's root directory. You can use any bash commands or the special interface to help you. Edit all the files you need.
Remember, YOU CAN ONLY ENTER ONE COMMAND AT A TIME. You should always wait for feedback after every command.
"""
def check_instance_status(instance, swe_result_dir):
output_file = swe_result_dir / "all_preds.jsonl"
res = True
# 先检查all_preds.jsonl文件是否存在
if not output_file.exists():
return res
with open(output_file, "r") as fp:
for line in fp:
existing_instance = json.loads(line.strip())
if existing_instance["instance_id"] == instance["instance_id"]:
return False
return True
async def terminal_run_command(cmd, terminal):
cmd_output = await terminal.run_command(cmd)
logger.info(f"command:{cmd} output:\n {cmd_output}")
return cmd_output
async def refresh_repo(instance, test_repo_dir, reclone_existing_repo=False):
terminal = Terminal()
try:
repo_path = Path(test_repo_dir) / (
instance["repo"].replace("-", "_").replace("/", "__") + "_" + instance["version"]
)
repo_identifier = instance["repo"]
base_commit = instance["base_commit"]
if os.path.exists(repo_path) and reclone_existing_repo is True:
logger.info(f"remove exist repo path:{repo_path.absolute()}")
shutil.rmtree(repo_path)
if os.path.exists(repo_path):
logger.info(f"reset exist repo path:{repo_path.absolute()}")
for cmd in [
f"cd {repo_path.absolute()}",
"git reset --hard && git clean -n -d && git clean -f -d",
"BRANCH=$(git remote show origin | awk '/HEAD branch/ {print $NF}')",
'git checkout "$BRANCH"',
"git branch",
"pwd",
]:
await terminal_run_command(cmd, terminal)
else:
logger.info(f"clone repo to path:{repo_path}")
for cmd in [
f"git clone 'https://github.com/{repo_identifier}.git' {repo_path.absolute()}",
f"cd {repo_path.absolute()}" + f" && git checkout -f {base_commit}" if base_commit else "",
"git branch",
"pwd",
]:
await terminal_run_command(cmd, terminal)
except Exception as e:
logger.warning(e)
finally:
await terminal.close()
return repo_path
async def get_git_diff(instance, test_repo_dir):
git_diff = ""
terminal = Terminal()
try:
repo_path = Path(test_repo_dir) / (
instance["repo"].replace("-", "_").replace("/", "__") + "_" + instance["version"]
)
# ignore backup file and submit stage
for cmd in [f"cd {repo_path.absolute()} ", "echo '.backup.*' >> .gitignore", "git add -A"]:
await terminal_run_command(cmd, terminal)
git_diff = await terminal_run_command("git diff --cached", terminal)
except Exception as e:
logger.error(f"Error during submission: {e}")
finally:
await terminal.close()
return git_diff
async def run(instance, swe_result_dir, args):
if not check_instance_status(instance, swe_result_dir):
logger.info(f"Instance {instance['instance_id']} already exists, skipping execution.")
return
# preparation for the repo
logger.info(f"**** Preparing to run {instance['instance_id']}****")
test_repo_dir = args.test_repo_dir
repo_path = await refresh_repo(instance, test_repo_dir, args.reclone_existing_repo)
user_requirement_and_issue = INSTANCE_TEMPLATE.format(
issue=instance["problem_statement"],
hints_text=instance["hints_text"],
repo_path=repo_path.absolute(),
version=instance["version"],
base_commit=instance["base_commit"],
)
logger.info(f"**** Starting to run {instance['instance_id']}****")
logger.info("User Requirement:\n" + user_requirement_and_issue)
try:
editor = Editor(enable_auto_lint=True, working_dir=Path(repo_path))
engineer = Engineer2(run_eval=True, editor=editor)
await asyncio.wait_for(engineer.run(user_requirement_and_issue), timeout=args.max_wait_time_per_case * 60)
except Exception as e:
logger.warning(f"**** exception lead to end: {instance['instance_id']}****\n\nerror:{e}")
# save the difference of repo
await save_predictions(engineer, instance, test_repo_dir, swe_result_dir)
logger.info(f"**** Finished running {instance['instance_id']}****")
async def save_predictions(engineer, instance, test_repo_dir, swe_result_dir):
output_file = swe_result_dir / "all_preds.jsonl"
instance["model_name_or_path"] = engineer.config.llm.model
instance["model_patch"] = await get_git_diff(instance, test_repo_dir)
logger.info(f"'model_patch':\n{instance['model_patch']}")
logger.info(f"Preparing to save predictions to {output_file}")
# Save the predictions to a JSONL file
with open(output_file, "a+") as fp:
print(json.dumps(instance), file=fp, flush=True)
logger.info(f"Saved prediction of {instance['instance_id']} to {output_file}")
async def async_main(args):
dataset_path = "manna-ai/SWE-bench_Nano" # "princeton-nlp/SWE-bench_Lite" #"manna-ai/SWE-bench_Nano"
dataset = load_hf_dataset(dataset_name_or_path=dataset_path, cache_dir=DATA_DIR, split="test")
swe_result_dir = Path(args.save_folder)
if swe_result_dir.exists():
logger.info(f"{swe_result_dir} exists; resuming test from last checkpoint.")
swe_result_dir.mkdir(parents=True, exist_ok=True)
for index, instance in enumerate(dataset):
# switch to a new logger file
logger.remove()
logger.add(sys.stderr, level="INFO")
logger.add(swe_result_dir / "logs" / f"{index+1}_{instance['instance_id']}.log", level="DEBUG")
await run(instance, swe_result_dir, args)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="the argument of scripts")
# 添加参数
swe_result_dir = (
DEFAULT_WORKSPACE_ROOT
/ f"result_{config.llm.model.replace('/', '_')}_start_time_{datetime.now().strftime('%Y_%m_%d_%H_%M_%S') }"
)
test_repo_dir = TEST_REPO_DIR.absolute()
swe_result_dir = swe_result_dir.absolute()
parser.add_argument(
"-rw", "--test_repo_dir", default=test_repo_dir, help="The directory to save temporary repositories", type=str
)
parser.add_argument("-s", "--save_folder", default=swe_result_dir, help="Folder to save results and logs", type=str)
parser.add_argument(
"-mwtc",
"--max_wait_time_per_case",
default=10,
help="Maximum wait time allowed per test case (in minutes)",
type=int,
)
parser.add_argument(
"-o",
"--reclone_existing_repo",
action="store_true",
help="If set, the existing repository will be removed and recloned.",
)
# 解析命令行参数
args = parser.parse_args()
asyncio.run(async_main(args))
"""
#
python tests/metagpt/roles/di/run_swe_agent_for_benchmark.py \
--test_repo_dir "./data/test_repo" \
--save_folder "./workspace/deepseek_coder_0907" \
--max_wait_time_per_case 10
"""
"""
# 重新克隆仓库
python tests/metagpt/roles/di/run_swe_agent_for_benchmark.py \
--test_repo_dir "./data/test_repo" \
--save_folder "./workspace/deepseek_coder_0907" \
--max_wait_time_per_case 10 \
--reclone_existing_repo
"""
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/roles/di/run_data_analyst.py | tests/metagpt/roles/di/run_data_analyst.py | from metagpt.roles.di.data_analyst import DataAnalyst
HOUSE_PRICE_TRAIN_PATH = "/data/house-prices-advanced-regression-techniques/split_train.csv"
HOUSE_PRICE_EVAL_PATH = "/data/house-prices-advanced-regression-techniques/split_eval.csv"
HOUSE_PRICE_REQ = f"""
This is a house price dataset, your goal is to predict the sale price of a property based on its features. The target column is SalePrice. Perform data analysis, data preprocessing, feature engineering, and modeling to predict the target. Report RMSE between the logarithm of the predicted value and the logarithm of the observed sales price on the eval data. Train data path: '{HOUSE_PRICE_TRAIN_PATH}', eval data path: '{HOUSE_PRICE_EVAL_PATH}'.
"""
CALIFORNIA_HOUSING_REQ = """
Analyze the 'Canifornia-housing-dataset' using https://scikit-learn.org/stable/modules/generated/sklearn.datasets.fetch_california_housing.html#sklearn.datasets.fetch_california_housing to predict the median house value. you need to perfrom data preprocessing, feature engineering and finally modeling to predict the target. Use machine learning techniques such as linear regression (including ridge regression and lasso regression), random forest, CatBoost, LightGBM, XGBoost or other appropriate method. You also need to report the MSE on the test dataset
"""
# For web scraping task, please provide url begin with `https://` or `http://`
PAPER_LIST_REQ = """"
Get data from `paperlist` table in https://papercopilot.com/statistics/iclr-statistics/iclr-2024-statistics/,
and save it to a csv file. paper title must include `multiagent` or `large language model`.
**Notice: view the page element before writing scraping code**
"""
ECOMMERCE_REQ = """
Get products data from website https://scrapeme.live/shop/ and save it as a csv file.
The first page product name, price, product URL, and image URL must be saved in the csv.
**Notice: view the page element before writing scraping code**
"""
NEWS_36KR_REQ = """从36kr创投平台https://pitchhub.36kr.com/financing-flash 所有初创企业融资的信息, **注意: 这是一个中文网站**;
下面是一个大致流程, 你会根据每一步的运行结果对当前计划中的任务做出适当调整:
1. 爬取并本地保存html结构;
2. 直接打印第7个*`快讯`*关键词后2000个字符的html内容, 作为*快讯的html内容示例*;
3. 反思*快讯的html内容示例*中的规律, 设计正则匹配表达式来获取*`快讯`*的标题、链接、时间;
4. 筛选最近3天的初创企业融资*`快讯`*, 以list[dict]形式打印前5个。
5. 将全部结果存在本地csv中
**Notice: view the page element before writing scraping code**
"""
WIKIPEDIA_SEARCH_REQ = """
Search for `LLM` on https://www.wikipedia.org/ and print all the meaningful significances of the entry.
"""
STACKOVERFLOW_CLICK_REQ = """
Click the Questions tag in https://stackoverflow.com/ and scrap question name, votes, answers and views num to csv in the first result page.
"""
async def main():
di = DataAnalyst()
await di.browser.start()
await di.run(STACKOVERFLOW_CLICK_REQ)
if __name__ == "__main__":
import asyncio
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/roles/di/run_project_manager.py | tests/metagpt/roles/di/run_project_manager.py | import asyncio
import os
from metagpt.roles.project_manager import ProjectManager
from metagpt.schema import Message
DESIGN_DOC_2048 = '{"Implementation approach":"We will use the Pygame library to implement the 2048 game logic and user interface. Pygame is a set of Python modules designed for writing video games, which will help us create a responsive and visually appealing UI. For the mobile responsiveness, we will ensure that the game scales appropriately on different screen sizes. We will also use the Pygame GUI library to create buttons for restarting the game and choosing difficulty levels.","File list":["main.py","game.py","ui.py"],"Data structures and interfaces":"\\nclassDiagram\\n class Game {\\n -grid: list[list[int]]\\n -score: int\\n +__init__()\\n +move(direction: str) bool\\n +merge() bool\\n +spawn_tile() None\\n +is_game_over() bool\\n +reset() None\\n }\\n class UI {\\n -game: Game\\n +__init__(game: Game)\\n +draw_grid() None\\n +draw_score() None\\n +draw_buttons() None\\n +handle_input() None\\n }\\n class Main {\\n -ui: UI\\n +main() None\\n }\\n Main --> UI\\n UI --> Game\\n","Program call flow":"\\nsequenceDiagram\\n participant M as Main\\n participant U as UI\\n participant G as Game\\n M->>U: __init__(game)\\n U->>G: __init__()\\n M->>U: draw_grid()\\n U->>G: move(direction)\\n G-->>U: return bool\\n U->>G: merge()\\n G-->>U: return bool\\n U->>G: spawn_tile()\\n G-->>U: return None\\n U->>G: is_game_over()\\n G-->>U: return bool\\n U->>G: reset()\\n G-->>U: return None\\n M->>U: draw_score()\\n M->>U: draw_buttons()\\n M->>U: handle_input()\\n","Anything UNCLEAR":"Clarification needed on the specific design elements for the UI to ensure it meets the \'beautiful\' requirement. Additionally, we need to confirm the exact difficulty levels and how they should affect the game mechanics."}'
DESIGN_DOC_SNAKE = """
{
"Implementation approach": "We will use the Pygame library to create the CLI-based snake game. Pygame is a set of Python modules designed for writing video games, which will help us handle graphics, sound, and input. The game will be structured into different modules to handle the main game loop, snake movement, food generation, collision detection, and user interface. We will ensure the game is engaging and responsive by optimizing the game loop and input handling. The score display and different speed levels will be implemented to enhance the user experience.",
"File list": [
"main.py",
"game.py",
"snake.py",
"food.py",
"ui.py"
],
"Data structures and interfaces": "\nclassDiagram\n class Main {\n +main() void\n }\n class Game {\n -Snake snake\n -Food food\n -int score\n -int speed\n +__init__(speed: int)\n +run() void\n +restart() void\n +update_score() void\n }\n class Snake {\n -list body\n -str direction\n +__init__()\n +move() void\n +change_direction(new_direction: str) void\n +check_collision() bool\n +grow() void\n }\n class Food {\n -tuple position\n +__init__()\n +generate_new_position() void\n }\n class UI {\n +display_score(score: int) void\n +display_game_over() void\n +display_game(snake: Snake, food: Food) void\n }\n Main --> Game\n Game --> Snake\n Game --> Food\n Game --> UI\n",
"Program call flow": "\nsequenceDiagram\n participant M as Main\n participant G as Game\n participant S as Snake\n participant F as Food\n participant U as UI\n M->>G: __init__(speed)\n M->>G: run()\n G->>S: __init__()\n G->>F: __init__()\n loop Game Loop\n G->>S: move()\n G->>S: check_collision()\n alt Collision Detected\n G->>G: restart()\n G->>U: display_game_over()\n else No Collision\n G->>F: generate_new_position()\n G->>S: grow()\n G->>G: update_score()\n G->>U: display_score(score)\n end\n G->>U: display_game(snake, food)\n end\n",
"Anything UNCLEAR": "Currently, all aspects of the project are clear."
}
"""
REQ = """Write a project schedule based on the design at temp_design.json"""
CASUAL_CHAT = """what's your name?"""
async def main(requirement):
with open("temp_design.json", "w") as f:
f.write(DESIGN_DOC_2048)
project_manager = ProjectManager()
await project_manager.run(Message(content=requirement, send_to="Eve"))
os.remove("temp_design.json")
if __name__ == "__main__":
asyncio.run(main(REQ))
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/roles/di/test_swe_agent.py | tests/metagpt/roles/di/test_swe_agent.py | import pytest
from metagpt.environment.mgx.mgx_env import MGXEnv
from metagpt.roles.di.swe_agent import SWEAgent
from metagpt.roles.di.team_leader import TeamLeader
from metagpt.schema import Message
from metagpt.tools.libs.terminal import Bash
@pytest.fixture
def env():
test_env = MGXEnv()
tl = TeamLeader()
test_env.add_roles([tl, SWEAgent()])
return test_env
@pytest.mark.asyncio
async def test_swe_agent(env):
requirement = "Fix bug in the calculator app"
swe = env.get_role("Swen")
message = Message(content=requirement, send_to={swe.name})
env.publish_message(message)
await swe.run()
history = env.history.get()
agent_messages = [msg for msg in history if msg.sent_from == swe.name]
assert swe.name == "Swen"
assert swe.profile == "Issue Solver"
assert isinstance(swe.terminal, Bash)
assert "Bash" in swe.tools
assert "git_create_pull" in swe.tool_execution_map
def is_valid_instruction_message(msg: Message) -> bool:
content = msg.content.lower()
return any(word in content for word in ["git", "bash", "check", "fix"])
assert any(is_valid_instruction_message(msg) for msg in agent_messages), "Should have valid instruction messages"
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/roles/di/run_engineer2.py | tests/metagpt/roles/di/run_engineer2.py | import asyncio
import sys
import uuid
from pathlib import Path
from metagpt.logs import logger
from metagpt.roles.di.engineer2 import Engineer2
DESIGN_DOC_2048 = '{"Implementation approach":"We will use the Pygame library to implement the 2048 game logic and user interface. Pygame is a set of Python modules designed for writing video games, which will help us create a responsive and visually appealing UI. For the mobile responsiveness, we will ensure that the game scales appropriately on different screen sizes. We will also use the Pygame GUI library to create buttons for restarting the game and choosing difficulty levels.","File list":["main.py","game.py","ui.py"],"Data structures and interfaces":"\\nclassDiagram\\n class Game {\\n -grid: list[list[int]]\\n -score: int\\n +__init__()\\n +move(direction: str) bool\\n +merge() bool\\n +spawn_tile() None\\n +is_game_over() bool\\n +reset() None\\n }\\n class UI {\\n -game: Game\\n +__init__(game: Game)\\n +draw_grid() None\\n +draw_score() None\\n +draw_buttons() None\\n +handle_input() None\\n }\\n class Main {\\n -ui: UI\\n +main() None\\n }\\n Main --> UI\\n UI --> Game\\n","Program call flow":"\\nsequenceDiagram\\n participant M as Main\\n participant U as UI\\n participant G as Game\\n M->>U: __init__(game)\\n U->>G: __init__()\\n M->>U: draw_grid()\\n U->>G: move(direction)\\n G-->>U: return bool\\n U->>G: merge()\\n G-->>U: return bool\\n U->>G: spawn_tile()\\n G-->>U: return None\\n U->>G: is_game_over()\\n G-->>U: return bool\\n U->>G: reset()\\n G-->>U: return None\\n M->>U: draw_score()\\n M->>U: draw_buttons()\\n M->>U: handle_input()\\n","Anything UNCLEAR":"Clarification needed on the specific design elements for the UI to ensure it meets the \'beautiful\' requirement. Additionally, we need to confirm the exact difficulty levels and how they should affect the game mechanics."}'
TASK_DOC_2048 = '{"Required Python packages":["pygame==2.0.1","pygame_gui==0.5.7"],"Required Other language third-party packages":["No third-party dependencies required"],"Logic Analysis":[["game.py","Contains Game class with methods: __init__, move, merge, spawn_tile, is_game_over, reset"],["ui.py","Contains UI class with methods: __init__, draw_grid, draw_score, draw_buttons, handle_input"],["main.py","Contains Main class with method: main, initializes UI and Game"]],"Task list":["game.py","ui.py","main.py"],"Full API spec":"","Shared Knowledge":"`game.py` contains core game logic and state management. `ui.py` handles all user interface elements and interactions. `main.py` serves as the entry point to initialize and run the game.","Anything UNCLEAR":"Clarification needed on the specific design elements for the UI to ensure it meets the \'beautiful\' requirement. Additionally, we need to confirm the exact difficulty levels and how they should affect the game mechanics."}'
DESIGN_DOC_SNAKE = """
{
"Implementation approach": "We will use the Pygame library to create the CLI-based snake game. Pygame is a set of Python modules designed for writing video games, which will help us handle graphics, sound, and input. The game will be structured into different modules to handle the main game loop, snake movement, food generation, collision detection, and user interface. We will ensure the game is engaging and responsive by optimizing the game loop and input handling. The score display and different speed levels will be implemented to enhance the user experience.",
"File list": [
"main.py",
"game.py",
"snake.py",
"food.py",
"ui.py"
],
"Data structures and interfaces": "\nclassDiagram\n class Main {\n +main() void\n }\n class Game {\n -Snake snake\n -Food food\n -int score\n -int speed\n +__init__(speed: int)\n +run() void\n +restart() void\n +update_score() void\n }\n class Snake {\n -list body\n -str direction\n +__init__()\n +move() void\n +change_direction(new_direction: str) void\n +check_collision() bool\n +grow() void\n }\n class Food {\n -tuple position\n +__init__()\n +generate_new_position() void\n }\n class UI {\n +display_score(score: int) void\n +display_game_over() void\n +display_game(snake: Snake, food: Food) void\n }\n Main --> Game\n Game --> Snake\n Game --> Food\n Game --> UI\n",
"Program call flow": "\nsequenceDiagram\n participant M as Main\n participant G as Game\n participant S as Snake\n participant F as Food\n participant U as UI\n M->>G: __init__(speed)\n M->>G: run()\n G->>S: __init__()\n G->>F: __init__()\n loop Game Loop\n G->>S: move()\n G->>S: check_collision()\n alt Collision Detected\n G->>G: restart()\n G->>U: display_game_over()\n else No Collision\n G->>F: generate_new_position()\n G->>S: grow()\n G->>G: update_score()\n G->>U: display_score(score)\n end\n G->>U: display_game(snake, food)\n end\n",
"Anything UNCLEAR": "Currently, all aspects of the project are clear."
}
"""
TASK_DOC_SNAKE = """
{
"Required Python packages": [
"pygame==2.0.1"
],
"Required Other language third-party packages": [
"No third-party dependencies required"
],
"Logic Analysis": [
[
"main.py",
"Contains the main function to initialize and start the game. Imports Game from game.py."
],
[
"game.py",
"Contains the Game class which manages the game loop, score, and speed. Imports Snake from snake.py, Food from food.py, and UI from ui.py."
],
[
"snake.py",
"Contains the Snake class which handles snake movement, direction changes, collision detection, and growth."
],
[
"food.py",
"Contains the Food class which handles food position generation."
],
[
"ui.py",
"Contains the UI class which handles displaying the score, game over screen, and the game state."
]
],
"Task list": [
"snake.py",
"food.py",
"ui.py",
"game.py",
"main.py"
],
"Full API spec": "",
"Shared Knowledge": "`game.py` contains the main game loop and integrates all other modules (snake, food, UI).",
"Anything UNCLEAR": "Currently, all aspects of the project are clear."
}
"""
GAME_REQ_2048 = f"""
Create a 2048 game, follow the design doc and task doc. Write your code under /Users/gary/Files/temp/workspace/2048_game/src.
After writing all codes, write a code review for the codes, make improvement or adjustment based on the review.
Notice: You MUST implement the full code, don't leave comment without implementation!
Design doc:
{DESIGN_DOC_2048}
Task doc:
{TASK_DOC_2048}
"""
GAME_REQ_SNAKE = f"""
Create a snake game, follow the design doc and task doc. Write your code under /Users/gary/Files/temp/workspace/snake_game/src.
After writing all codes, write a code review for the codes, make improvement or adjustment based on the review.
Notice: You MUST implement the full code, don't leave comment without implementation!
Design doc:
{DESIGN_DOC_SNAKE}
Task doc:
{TASK_DOC_SNAKE}
"""
GAME_REQ_2048_NO_DOC = """
Create a 2048 game with pygame. Write your code under /Users/gary/Files/temp/workspace/2048_game/src.
Consider what files you will write, break down the requests to multiple tasks and write one file in each task.
After writing all codes, write a code review for the codes, make improvement or adjustment based on the review.
Notice: You MUST implement the full code, don't leave comment without implementation!
"""
GAME_INC_REQ_2048 = """
I found an issue with the 2048 code: when tiles are merged, no new tiles pop up.
Write code review for the codes (game.py, main.py, ui.py) under under /Users/gary/Files/temp/workspace/2048_game_bugs/src.
Then correct any issues you find. You can review all code in one time, and solve issues in one time.
"""
GAME_INC_REQ_SNAKE = """
Found this issue, TypeError: generate_new_position() missing 1 required positional argument: 'snake_body'
Write code review for the codes (food.py, game.py, main.py, snake.py, ui.py) under under /Users/gary/Files/temp/workspace/snake_game_bugs/src.
Then correct any issues you find. You can review all code in one time, and solve issues in one time.
"""
CASUAL_CHAT = """what's your name?"""
# increment development
INC_DEVELOPMENT_CASE1 = [
"Complete the Snake game with the root directory at '/home/mgx/mgx/MetaGPT/workspace/snake_game'",
"Use the up button to control the snake to move down, the left button to move right, and so on",
"Place the restart/start button at the top",
"Add a pause button",
"Display the score and leaderboard in real-time on the page",
]
INC_DEVELOPMENT_CASE2 = [
"Develop a Snake game using Python in the '/home/mgx/mgx/MetaGPT/workspace/snake_game_py' folder",
"Change the title to 'Special Snake'",
"Use the up button to control the snake to move down, the left button to move right, and so on",
"Add a pause button",
"Display the score and leaderboard in real-time on the page",
"Design a more attractive style for the leaderboard",
]
INC_DEVELOPMENT_CASE3 = [
"Complete the 2048 game with the root directory at '/home/mgx/mgx/MetaGPT/workspace/2048_game'",
"Place the start button at the top",
"Display the score and leaderboard in real-time on the page",
"Design a more attractive style for the leaderboard",
"Add a restart button",
]
INC_DEVELOPMENT_CASE4 = [
"Develop a 2048 game using Python in the '/home/mgx/mgx/MetaGPT/workspace/2048_game_py' folder",
"Display the score and leaderboard in real-time on the page",
"Add a restart button",
]
INC_DEVELOPMENT_CASE5 = [
"Root path is '/home/mgx/mgx/MetaGPT/workspace/to_list' Create a website widget for TODO list management. Users should be able to add, mark as complete, and delete tasks. Include features like prioritization, due dates, and categories. Make it visually appealing, responsive, and user-friendly. Use HTML, CSS, and JavaScript. Consider additional features like notifications or task export. Keep it simple and enjoyable for users.dont use vue or react.dont use third party library, use localstorage to save data.",
"Add a `clean all` buttonn",
]
INC_DEVELOPMENT_CASE6 = [
'使用原生HTML开发一个塔罗牌角色介绍网站\n1. 主题是塔罗牌占卜的网站\n2. 超前的网页布局\n3. 页面需要时响应式的\n4. 页面需要美观大气 root path "”/home/mgx/mgx/MetaGPT/workspace/taro"',
"扩充更多的角色,添加3个自己想出来的角色",
"让每一个角色的描述更加清楚",
"将中文内容全部替换为英文包括js里面的内容",
]
async def increment_development():
engineer2 = Engineer2(run_eval=True)
example = INC_DEVELOPMENT_CASE6
logger.remove()
logger.add(sys.stderr, level="INFO")
logger.add(Path("logs") / f"{str(uuid.uuid4())[-12:]}.log", level="DEBUG")
logger.info("user requirement:\n" + "\n".join(example))
try:
for user_requirement in example:
logger.info(f"input:{user_requirement}")
await engineer2.run(user_requirement)
except Exception as e:
print(e)
if __name__ == "__main__":
asyncio.run(increment_development())
# engineer2 = Engineer2()
# asyncio.run(engineer2.run(GAME_REQ_2048_NO_DOC))
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/roles/di/test_data_interpreter.py | tests/metagpt/roles/di/test_data_interpreter.py | import pytest
from metagpt.logs import logger
from metagpt.roles.di.data_interpreter import DataInterpreter
@pytest.mark.asyncio
@pytest.mark.parametrize("auto_run", [(True), (False)])
async def test_interpreter(mocker, auto_run):
mocker.patch("metagpt.actions.di.execute_nb_code.ExecuteNbCode.run", return_value=("a successful run", True))
mocker.patch("builtins.input", return_value="confirm")
requirement = "Run data analysis on sklearn Wine recognition dataset, include a plot, and train a model to predict wine class (20% as validation), and show validation accuracy."
di = DataInterpreter(auto_run=auto_run)
rsp = await di.run(requirement)
logger.info(rsp)
assert len(rsp.content) > 0
finished_tasks = di.planner.plan.get_finished_tasks()
assert len(finished_tasks) > 0
assert len(finished_tasks[0].code) > 0 # check one task to see if code is recorded
@pytest.mark.asyncio
async def test_interpreter_react_mode(mocker):
mocker.patch("metagpt.actions.di.execute_nb_code.ExecuteNbCode.run", return_value=("a successful run", True))
requirement = "Run data analysis on sklearn Wine recognition dataset, include a plot, and train a model to predict wine class (20% as validation), and show validation accuracy."
di = DataInterpreter(react_mode="react")
rsp = await di.run(requirement)
logger.info(rsp)
assert len(rsp.content) > 0
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/roles/di/test_routing.py | tests/metagpt/roles/di/test_routing.py | import asyncio
from metagpt.environment.mgx.mgx_env import MGXEnv
from metagpt.logs import logger
from metagpt.roles import Architect, ProductManager, ProjectManager
from metagpt.roles.di.data_analyst import DataAnalyst
from metagpt.roles.di.engineer2 import Engineer2
from metagpt.roles.di.team_leader import TeamLeader
from metagpt.schema import Message
NORMAL_QUESTION = [
"create a 2048 game",
"write a snake game",
"Write a 2048 game using JavaScript without using any frameworks, user can play with keyboard.",
"print statistic summary of sklearn iris dataset",
"Run data analysis on sklearn Wine recognition dataset, and train a model to predict wine class (20% as validation), and show validation accuracy.",
"""
Get data from `paperlist` table in https://papercopilot.com/statistics/iclr-statistics/iclr-2024-statistics/,
and save it to a csv file. paper title must include `multiagent` or `large language model`. *notice: print key variables*
""",
"""
Get products data from website https://scrapeme.live/shop/ and save it as a csv file.
The first page product name, price, product URL, and image URL must be saved in the csv;**
""",
"""
Write a fix for this issue: https://github.com/langchain-ai/langchain/issues/20453,
you can fix it on this repo https://github.com/garylin2099/langchain,
checkout a branch named test-fix, commit your changes, push, and create a PR to the master branch of https://github.com/iorisa/langchain
""",
"Open this link and make a sumamry: https://github.com/geekan/MetaGPT", # should not confuse with searching
"请查看这个网页https://platform.openai.com/docs/models", # should not confuse with searching
]
SEARCH_QUESTION = [
"今天的天气怎样?",
"全球智能手机市场份额排名是什么?前三名的品牌各占多少百分比?",
"中国股市上市公司数量是多少?",
"奥运会将在哪里举行?有哪些新增的比赛项目?",
"最近一周全球原油价格的走势如何?",
"当前全球碳排放量最大的三个国家是哪些?",
"当前全球碳排放量最大的三个国家各占多少比例",
"最新的全球教育质量排名中,前五名的国家是哪些?",
"当前全球最大的几家电动汽车制造商是哪些?",
"奥运会的开幕式是什么时候",
"Recommend some gyms near Shenzhen University",
"Which university tops QS ranking?",
"Which university tops QS ranking this year?",
"The stock price of Nvidia?",
# longer questions
"请为我查找位于深圳大学附近1000米范围内,价格适中(性价比最高),且晚上关门时间晚于22:00的健身房。",
"When is the Olympic football final this year, where will it be held, and where can I buy tickets? If possible, please provide me with a link to buy tickets",
"Help me search for Inter Miami CF home games in the next 2 months and give me the link to buy tickets",
]
QUICK_QUESTION = [
## general knowledge qa, logical, math ##
"""Who is the first man landing on Moon""",
"""In DNA adenine normally pairs with: A. cytosine. B. guanine. C. thymine. D. uracil. Answer:""",
"""________________ occur(s) where there is no prior history of exchange and no future exchanges are expected between a buyer and seller. A. Relationship marketing. B. Service mix. C. Market exchanges. D. Service failure. Answer:""",
"""Within American politics, the power to accord official recognition to other countries belongs to A. the Senate. B. the president. C. the Secretary of State. D. the chairman of the Joint Chiefs. Answer:""",
"""Find the degree for the given field extension Q(sqrt(2), sqrt(3), sqrt(18)) over Q.""",
"""True or false? Statement 1 | A ring homomorphism is one to one if and only if the kernel is {{0}},. Statement 2 | Q is an ideal in R""",
"""Jean has 30 lollipops. Jean eats 2 of the lollipops. With the remaining lollipops, Jean wants to package 2 lollipops in one bag. How many bags can Jean fill?""",
"""Alisa biked 12 miles per hour for 4.5 hours. Stanley biked at 10 miles per hour for 2.5 hours. How many miles did Alisa and Stanley bike in total?""",
## function filling (humaneval) ##
"""
def has_close_elements(numbers: List[float], threshold: float) -> bool:
''' Check if in given list of numbers, are any two numbers closer to each other than
given threshold.
>>> has_close_elements([1.0, 2.0, 3.0], 0.5)
False
>>> has_close_elements([1.0, 2.8, 3.0, 4.0, 5.0, 2.0], 0.3)
True
'''
""",
"""
def is_palindrome(string: str) -> bool:
''' Test if given string is a palindrome '''
return string == string[::-1]
def make_palindrome(string: str) -> str:
''' Find the shortest palindrome that begins with a supplied string.
Algorithm idea is simple:
- Find the longest postfix of supplied string that is a palindrome.
- Append to the end of the string reverse of a string prefix that comes before the palindromic suffix.
>>> make_palindrome('')
''
>>> make_palindrome('cat')
'catac'
>>> make_palindrome('cata')
'catac'
'''
""",
# casual chat
"""What's your name?""",
"Who are you",
"What can you do",
"Hi",
"1+1",
# programming-related but not requiring software development SOP
"请写一个python入门教程",
"python里的装饰器是怎么用的,给我个例子",
"写一个java的hello world程序",
]
async def test_routing_acc():
role = TeamLeader()
env = MGXEnv()
env.add_roles(
[
role,
ProductManager(),
Architect(),
ProjectManager(),
Engineer2(),
DataAnalyst(),
]
)
for q in QUICK_QUESTION:
msg = Message(content=q)
role.put_message(msg)
await role._observe()
rsp, intent_result = await role._quick_think()
role.rc.memory.clear()
if "YES" not in intent_result:
logger.error(f"Quick question failed: {q}")
for q in SEARCH_QUESTION:
msg = Message(content=q)
role.put_message(msg)
await role._observe()
rsp, intent_result = await role._quick_think()
role.rc.memory.clear()
if "SEARCH" not in intent_result:
logger.error(f"Search question failed: {q}")
for q in NORMAL_QUESTION:
msg = Message(content=q)
role.put_message(msg)
await role._observe()
rsp, intent_result = await role._quick_think()
role.rc.memory.clear()
if "NO" not in intent_result:
logger.error(f"Normal question failed: {q}")
if __name__ == "__main__":
asyncio.run(test_routing_acc())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/roles/di/run_swe_agent_open_source_issue.py | tests/metagpt/roles/di/run_swe_agent_open_source_issue.py | import asyncio
from metagpt.logs import logger
from metagpt.roles.di.swe_agent import SWEAgent
FIX_ISSUE1 = """
Write a fix for this issue: https://github.com/langchain-ai/langchain/issues/20453,
you can fix it on this repo https://github.com/garylin2099/langchain
"""
# + "checkout a branch named test-fix, commit your changes, push,
# and create a PR to the master branch of https://github.com/iorisa/langchain"
# """
FIX_ISSUE2 = """
Write a fix for this issue https://github.com/geekan/MetaGPT/issues/1275.
You can fix it on the v0.8-release branch of this repo https://github.com/garylin2099/MetaGPT
"""
# + "during fixing, checkout a branch named test-fix-1275, commit your changes, push,
# and create a PR to the v0.8-release branch of https://github.com/garylin2099/MetaGPT"
FIX_ISSUE3 = """
Write a fix for this issue https://github.com/geekan/MetaGPT/issues/1262.
You can fix it on this repo https://github.com/garylin2099/MetaGPT
"""
# during fixing, checkout a branch named test-fix-1262, commit your changes, push,
# and create a PR to https://github.com/garylin2099/MetaGPT
# """
FIX_ISSUE_SIMPLE = """
Write a fix for this issue: https://github.com/mannaandpoem/simple_calculator/issues/1,
you can fix it on this repo https://github.com/garylin2099/simple_calculator
"""
# checkout a branch named test, commit your changes, push, and create a PR to the master branch of original repo.
# """
NO_ENV_TIP = """
Because the environment is not available, you DO NOT need to run and modify any existing test case files or
add new test case files to ensure that the bug is fixed.
"""
if __name__ == "__main__":
swe_agent = SWEAgent()
logger.info("**** Starting run ****")
user_requirement_and_issue = FIX_ISSUE1 + NO_ENV_TIP
asyncio.run(swe_agent.run(user_requirement_and_issue))
logger.info("**** Finished running ****")
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/ext/__init__.py | tests/metagpt/ext/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/ext/android_assistant/test_parse_record.py | tests/metagpt/ext/android_assistant/test_parse_record.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : test case (imgs from appagent's)
import asyncio
from metagpt.actions.action import Action
from metagpt.const import TEST_DATA_PATH
from metagpt.ext.android_assistant.actions.parse_record import ParseRecord
TASK_PATH = TEST_DATA_PATH.joinpath("andriod_assistant/demo_Contacts")
TEST_BEFORE_PATH = TASK_PATH.joinpath("labeled_screenshots/0_labeled.png")
TEST_AFTER_PATH = TASK_PATH.joinpath("labeled_screenshots/1_labeled.png")
RECORD_PATH = TASK_PATH.joinpath("record.txt")
TASK_DESC_PATH = TASK_PATH.joinpath("task_desc.txt")
DOCS_DIR = TASK_PATH.joinpath("storage")
test_action = Action(name="test")
async def manual_learn_test():
parse_record = ParseRecord()
await parse_record.run(app_name="demo_Contacts", task_dir=TASK_PATH, docs_dir=DOCS_DIR)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(manual_learn_test())
loop.close()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/ext/android_assistant/__init__.py | tests/metagpt/ext/android_assistant/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/ext/android_assistant/test_an.py | tests/metagpt/ext/android_assistant/test_an.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : test on android emulator action. After Modify Role Test, this script is discarded.
import asyncio
import time
from pathlib import Path
import metagpt
from metagpt.const import TEST_DATA_PATH
from metagpt.environment.android.android_env import AndroidEnv
from metagpt.ext.android_assistant.actions.manual_record import ManualRecord
from metagpt.ext.android_assistant.actions.parse_record import ParseRecord
from metagpt.ext.android_assistant.actions.screenshot_parse import ScreenshotParse
from metagpt.ext.android_assistant.actions.self_learn_and_reflect import (
SelfLearnAndReflect,
)
from tests.metagpt.environment.android_env.test_android_ext_env import (
mock_device_shape,
mock_list_devices,
)
TASK_PATH = TEST_DATA_PATH.joinpath("andriod_assistant/unitest_Contacts")
TASK_PATH.mkdir(parents=True, exist_ok=True)
DEMO_NAME = str(time.time())
SELF_EXPLORE_DOC_PATH = TASK_PATH.joinpath("auto_docs")
PARSE_RECORD_DOC_PATH = TASK_PATH.joinpath("demo_docs")
device_id = "emulator-5554"
xml_dir = Path("/sdcard")
screenshot_dir = Path("/sdcard/Pictures/Screenshots")
metagpt.environment.android.android_ext_env.AndroidExtEnv.execute_adb_with_cmd = mock_device_shape
metagpt.environment.android.android_ext_env.AndroidExtEnv.list_devices = mock_list_devices
test_env_self_learn_android = AndroidEnv(
device_id=device_id,
xml_dir=xml_dir,
screenshot_dir=screenshot_dir,
)
test_self_learning = SelfLearnAndReflect()
test_env_manual_learn_android = AndroidEnv(
device_id=device_id,
xml_dir=xml_dir,
screenshot_dir=screenshot_dir,
)
test_manual_record = ManualRecord()
test_manual_parse = ParseRecord()
test_env_screenshot_parse_android = AndroidEnv(
device_id=device_id,
xml_dir=xml_dir,
screenshot_dir=screenshot_dir,
)
test_screenshot_parse = ScreenshotParse()
if __name__ == "__main__":
loop = asyncio.get_event_loop()
test_action_list = [
test_self_learning.run(
round_count=20,
task_desc="Create a contact in Contacts App named zjy with a phone number +86 18831933368 ",
last_act="",
task_dir=TASK_PATH / "demos" / f"self_learning_{DEMO_NAME}",
docs_dir=SELF_EXPLORE_DOC_PATH,
env=test_env_self_learn_android,
),
test_manual_record.run(
task_dir=TASK_PATH / "demos" / f"manual_record_{DEMO_NAME}",
task_desc="Create a contact in Contacts App named zjy with a phone number +86 18831933368 ",
env=test_env_manual_learn_android,
),
test_manual_parse.run(
task_dir=TASK_PATH / "demos" / f"manual_record_{DEMO_NAME}", # 修要修改
docs_dir=PARSE_RECORD_DOC_PATH, # 需要修改
env=test_env_manual_learn_android,
),
test_screenshot_parse.run(
round_count=20,
task_desc="Create a contact in Contacts App named zjy with a phone number +86 18831933368 ",
last_act="",
task_dir=TASK_PATH / f"act_{DEMO_NAME}",
docs_dir=PARSE_RECORD_DOC_PATH,
env=test_env_screenshot_parse_android,
grid_on=False,
),
]
loop.run_until_complete(asyncio.gather(*test_action_list))
loop.close()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/ext/werewolf/__init__.py | tests/metagpt/ext/werewolf/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/ext/werewolf/actions/test_experience_operation.py | tests/metagpt/ext/werewolf/actions/test_experience_operation.py | import json
import pytest
from metagpt.const import DEFAULT_WORKSPACE_ROOT
from metagpt.ext.werewolf.actions import AddNewExperiences, RetrieveExperiences
from metagpt.ext.werewolf.schema import RoleExperience
from metagpt.logs import logger
class TestExperiencesOperation:
collection_name = "test"
test_round_id = "test_01"
version = "test"
samples_to_add = [
RoleExperience(
profile="Witch",
reflection="The game is intense with two players claiming to be the Witch and one claiming to be the Seer. "
"Player4's behavior is suspicious.",
response="",
outcome="",
round_id=test_round_id,
version=version,
),
RoleExperience(
profile="Witch",
reflection="The game is in a critical state with only three players left, "
"and I need to make a wise decision to save Player7 or not.",
response="",
outcome="",
round_id=test_round_id,
version=version,
),
RoleExperience(
profile="Seer",
reflection="Player1, who is a werewolf, falsely claimed to be a Seer, and Player6, who might be a Witch, "
"sided with him. I, as the real Seer, am under suspicion.",
response="",
outcome="",
round_id=test_round_id,
version=version,
),
RoleExperience(
profile="TestRole",
reflection="Some test reflection1",
response="",
outcome="",
round_id=test_round_id,
version=version + "_01-10",
),
RoleExperience(
profile="TestRole",
reflection="Some test reflection2",
response="",
outcome="",
round_id=test_round_id,
version=version + "_11-20",
),
RoleExperience(
profile="TestRole",
reflection="Some test reflection3",
response="",
outcome="",
round_id=test_round_id,
version=version + "_21-30",
),
]
@pytest.mark.asyncio
async def test_add(self):
saved_file = DEFAULT_WORKSPACE_ROOT.joinpath(
f"werewolf_game/experiences/{self.version}/{self.test_round_id}.json"
)
if saved_file.exists():
saved_file.unlink()
action = AddNewExperiences(collection_name=self.collection_name, delete_existing=True)
action.run(self.samples_to_add)
# test insertion
inserted = action.engine.retriever._index._vector_store._collection.get()
assert len(inserted["documents"]) == len(self.samples_to_add)
# test if we record the samples correctly to local file
# & test if we could recover a embedding db from the file
action = AddNewExperiences(collection_name=self.collection_name, delete_existing=True)
action.add_from_file(saved_file)
inserted = action.engine.retriever._index._vector_store._collection.get()
assert len(inserted["documents"]) == len(self.samples_to_add)
@pytest.mark.asyncio
async def test_retrieve(self):
action = RetrieveExperiences(collection_name=self.collection_name)
query = "one player claimed to be Seer and the other Witch"
results = action.run(query, profile="Witch")
results = json.loads(results)
assert len(results) == 2, "Witch should have 2 experiences"
assert "The game is intense with two players" in results[0]
@pytest.mark.asyncio
async def test_retrieve_filtering(self):
action = RetrieveExperiences(collection_name=self.collection_name)
query = "some test query"
profile = "TestRole"
excluded_version = ""
results = action.run(query, profile=profile, excluded_version=excluded_version)
results = json.loads(results)
assert len(results) == 3
excluded_version = self.version + "_21-30"
results = action.run(query, profile=profile, excluded_version=excluded_version)
results = json.loads(results)
assert len(results) == 2
class TestActualRetrieve:
collection_name = "role_reflection"
@pytest.mark.asyncio
async def test_check_experience_pool(self):
logger.info("check experience pool")
action = RetrieveExperiences(collection_name=self.collection_name)
if action.engine:
all_experiences = action.engine.retriever._index._vector_store._collection.get()
logger.info(f"{len(all_experiences['metadatas'])=}")
@pytest.mark.asyncio
async def test_retrieve_werewolf_experience(self):
action = RetrieveExperiences(collection_name=self.collection_name)
query = "there are conflicts"
logger.info(f"test retrieval with {query=}")
action.run(query, "Werewolf")
@pytest.mark.asyncio
async def test_retrieve_villager_experience(self):
action = RetrieveExperiences(collection_name=self.collection_name)
query = "there are conflicts"
logger.info(f"test retrieval with {query=}")
results = action.run(query, "Seer")
assert "conflict" not in results # 相似局面应该需要包含conflict关键词
@pytest.mark.asyncio
async def test_retrieve_villager_experience_filtering(self):
action = RetrieveExperiences(collection_name=self.collection_name)
query = "there are conflicts"
excluded_version = "01-10"
logger.info(f"test retrieval with {excluded_version=}")
results_01_10 = action.run(query, profile="Seer", excluded_version=excluded_version, verbose=True)
excluded_version = "11-20"
logger.info(f"test retrieval with {excluded_version=}")
results_11_20 = action.run(query, profile="Seer", excluded_version=excluded_version, verbose=True)
assert results_01_10 == results_11_20
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/ext/werewolf/actions/__init__.py | tests/metagpt/ext/werewolf/actions/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/ext/stanford_town/test_reflect.py | tests/metagpt/ext/stanford_town/test_reflect.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : the unittest of reflection
import pytest
from metagpt.environment import StanfordTownEnv
from metagpt.ext.stanford_town.actions.run_reflect_action import (
AgentEventTriple,
AgentFocusPt,
AgentInsightAndGuidance,
)
from metagpt.ext.stanford_town.roles.st_role import STRole
from metagpt.ext.stanford_town.utils.const import MAZE_ASSET_PATH
@pytest.mark.asyncio
async def test_reflect():
"""
init STRole form local json, set sim_code(path),curr_time & start_time
"""
role = STRole(
sim_code="base_the_ville_isabella_maria_klaus",
start_time="February 13, 2023",
curr_time="February 13, 2023, 00:00:00",
)
role.set_env(StanfordTownEnv(maze_asset_path=MAZE_ASSET_PATH))
role.init_curr_tile()
run_focus = AgentFocusPt()
statements = ""
await run_focus.run(role, statements, n=3)
"""
这里有通过测试的结果,但是更多时候LLM生成的结果缺少了because of;考虑修改一下prompt
result = {'Klaus Mueller and Maria Lopez have a close relationship because they have been friends for a long time and have a strong bond': [1, 2, 5, 9, 11, 14], 'Klaus Mueller has a crush on Maria Lopez': [8, 15, 24], 'Klaus Mueller is academically inclined and actively researching a topic': [13, 20], 'Klaus Mueller is socially active and acquainted with Isabella Rodriguez': [17, 21, 22], 'Klaus Mueller is organized and prepared': [19]}
"""
run_insight = AgentInsightAndGuidance()
statements = "[user: Klaus Mueller has a close relationship with Maria Lopez, user:s Mueller and Maria Lopez have a close relationship, user: Klaus Mueller has a close relationship with Maria Lopez, user: Klaus Mueller has a close relationship with Maria Lopez, user: Klaus Mueller and Maria Lopez have a strong relationship, user: Klaus Mueller is a dormmate of Maria Lopez., user: Klaus Mueller and Maria Lopez have a strong bond, user: Klaus Mueller has a crush on Maria Lopez, user: Klaus Mueller and Maria Lopez have been friends for more than 2 years., user: Klaus Mueller has a close relationship with Maria Lopez, user: Klaus Mueller Maria Lopez is heading off to college., user: Klaus Mueller and Maria Lopez have a close relationship, user: Klaus Mueller is actively researching a topic, user: Klaus Mueller is close friends and classmates with Maria Lopez., user: Klaus Mueller is socially active, user: Klaus Mueller has a crush on Maria Lopez., user: Klaus Mueller and Maria Lopez have been friends for a long time, user: Klaus Mueller is academically inclined, user: For Klaus Mueller's planning: should remember to ask Maria Lopez about her research paper, as she found it interesting that he mentioned it., user: Klaus Mueller is acquainted with Isabella Rodriguez, user: Klaus Mueller is organized and prepared, user: Maria Lopez is conversing about conversing about Maria's research paper mentioned by Klaus, user: Klaus Mueller is conversing about conversing about Maria's research paper mentioned by Klaus, user: Klaus Mueller is a student, user: Klaus Mueller is a student, user: Klaus Mueller is conversing about two friends named Klaus Mueller and Maria Lopez discussing their morning plans and progress on a research paper before Maria heads off to college., user: Klaus Mueller is socially active, user: Klaus Mueller is socially active, user: Klaus Mueller is socially active and acquainted with Isabella Rodriguez, user: Klaus Mueller has a crush on Maria Lopez]"
await run_insight.run(role, statements, n=5)
run_triple = AgentEventTriple()
statements = "(Klaus Mueller is academically inclined)"
await run_triple.run(statements, role)
role.scratch.importance_trigger_curr = -1
role.reflect()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/ext/stanford_town/__init__.py | tests/metagpt/ext/stanford_town/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/ext/stanford_town/plan/test_st_plan.py | tests/metagpt/ext/stanford_town/plan/test_st_plan.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : unittest of st_plan
import pytest
from metagpt.ext.stanford_town.plan.st_plan import _choose_retrieved, _should_react
from tests.metagpt.ext.stanford_town.plan.test_conversation import init_two_roles
@pytest.mark.asyncio
async def test_should_react():
role_ir, role_km = await init_two_roles()
roles = {role_ir.name: role_ir, role_km.name: role_km}
role_ir.scratch.act_address = "mock data"
observed = await role_ir.observe()
retrieved = role_ir.retrieve(observed)
focused_event = _choose_retrieved(role_ir.name, retrieved)
if focused_event:
reaction_mode = await _should_react(role_ir, focused_event, roles) # chat with Isabella Rodriguez
assert not reaction_mode
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/ext/stanford_town/plan/test_conversation.py | tests/metagpt/ext/stanford_town/plan/test_conversation.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : unittest of roles conversation
from typing import Tuple
import pytest
from metagpt.environment import StanfordTownEnv
from metagpt.ext.stanford_town.plan.converse import agent_conversation
from metagpt.ext.stanford_town.roles.st_role import STRole
from metagpt.ext.stanford_town.utils.const import MAZE_ASSET_PATH, STORAGE_PATH
from metagpt.ext.stanford_town.utils.mg_ga_transform import get_reverie_meta
from metagpt.ext.stanford_town.utils.utils import copy_folder
async def init_two_roles(fork_sim_code: str = "base_the_ville_isabella_maria_klaus") -> Tuple["STRole"]:
sim_code = "unittest_sim"
copy_folder(str(STORAGE_PATH.joinpath(fork_sim_code)), str(STORAGE_PATH.joinpath(sim_code)))
reverie_meta = get_reverie_meta(fork_sim_code)
role_ir_name = "Isabella Rodriguez"
role_km_name = "Klaus Mueller"
env = StanfordTownEnv(maze_asset_path=MAZE_ASSET_PATH)
role_ir = STRole(
name=role_ir_name,
sim_code=sim_code,
profile=role_ir_name,
step=reverie_meta.get("step"),
start_time=reverie_meta.get("start_date"),
curr_time=reverie_meta.get("curr_time"),
sec_per_step=reverie_meta.get("sec_per_step"),
)
role_ir.set_env(env)
await role_ir.init_curr_tile()
role_km = STRole(
name=role_km_name,
sim_code=sim_code,
profile=role_km_name,
step=reverie_meta.get("step"),
start_time=reverie_meta.get("start_date"),
curr_time=reverie_meta.get("curr_time"),
sec_per_step=reverie_meta.get("sec_per_step"),
)
role_km.set_env(env)
await role_km.init_curr_tile()
return role_ir, role_km
@pytest.mark.asyncio
async def test_agent_conversation():
role_ir, role_km = await init_two_roles()
curr_chat = await agent_conversation(role_ir, role_km, conv_rounds=2)
assert len(curr_chat) % 2 == 0
meet = False
for conv in curr_chat:
if "Valentine's Day party" in conv[1]:
# conv[0] speaker, conv[1] utterance
meet = True
assert meet
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/ext/stanford_town/plan/__init__.py | tests/metagpt/ext/stanford_town/plan/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/ext/stanford_town/actions/test_summarize_conv.py | tests/metagpt/ext/stanford_town/actions/test_summarize_conv.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : unittest of actions/summarize_conv
import pytest
from metagpt.ext.stanford_town.actions.summarize_conv import SummarizeConv
@pytest.mark.asyncio
async def test_summarize_conv():
conv = [("Role_A", "what's the weather today?"), ("Role_B", "It looks pretty good, and I will take a walk then.")]
output = await SummarizeConv().run(conv)
assert "weather" in output
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/ext/stanford_town/actions/test_gen_action_details.py | tests/metagpt/ext/stanford_town/actions/test_gen_action_details.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : unittest of actions/gen_action_details.py
import pytest
from metagpt.environment import StanfordTownEnv
from metagpt.environment.api.env_api import EnvAPIAbstract
from metagpt.ext.stanford_town.actions.gen_action_details import (
GenActionArena,
GenActionDetails,
GenActionObject,
GenActionSector,
GenActObjDescription,
)
from metagpt.ext.stanford_town.roles.st_role import STRole
from metagpt.ext.stanford_town.utils.const import MAZE_ASSET_PATH
@pytest.mark.asyncio
async def test_gen_action_details():
role = STRole(
name="Klaus Mueller",
start_time="February 13, 2023",
curr_time="February 13, 2023, 00:00:00",
sim_code="base_the_ville_isabella_maria_klaus",
)
role.set_env(StanfordTownEnv(maze_asset_path=MAZE_ASSET_PATH))
await role.init_curr_tile()
act_desp = "sleeping"
act_dura = "120"
access_tile = await role.rc.env.read_from_api(
EnvAPIAbstract(api_name="access_tile", kwargs={"tile": role.scratch.curr_tile})
)
act_world = access_tile["world"]
assert act_world == "the Ville"
sector = await GenActionSector().run(role, access_tile, act_desp)
arena = await GenActionArena().run(role, act_desp, act_world, sector)
temp_address = f"{act_world}:{sector}:{arena}"
obj = await GenActionObject().run(role, act_desp, temp_address)
act_obj_desp = await GenActObjDescription().run(role, obj, act_desp)
result_dict = await GenActionDetails().run(role, act_desp, act_dura)
# gen_action_sector
assert isinstance(sector, str)
assert sector in role.s_mem.get_str_accessible_sectors(act_world)
# gen_action_arena
assert isinstance(arena, str)
assert arena in role.s_mem.get_str_accessible_sector_arenas(f"{act_world}:{sector}")
# gen_action_obj
assert isinstance(obj, str)
assert obj in role.s_mem.get_str_accessible_arena_game_objects(temp_address)
if result_dict:
for key in [
"action_address",
"action_duration",
"action_description",
"action_pronunciatio",
"action_event",
"chatting_with",
"chat",
"chatting_with_buffer",
"chatting_end_time",
"act_obj_description",
"act_obj_pronunciatio",
"act_obj_event",
]:
assert key in result_dict
assert result_dict["action_address"] == f"{temp_address}:{obj}"
assert result_dict["action_duration"] == int(act_dura)
assert result_dict["act_obj_description"] == act_obj_desp
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/ext/stanford_town/actions/__init__.py | tests/metagpt/ext/stanford_town/actions/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/ext/stanford_town/memory/test_basic_memory.py | tests/metagpt/ext/stanford_town/memory/test_basic_memory.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : the unittest of BasicMemory
from datetime import datetime, timedelta
import pytest
from metagpt.ext.stanford_town.memory.agent_memory import BasicMemory
from metagpt.logs import logger
"""
memory测试思路
1. Basic Memory测试
2. Agent Memory测试
2.1 Load & Save方法测试
2.2 Add方法测试
2.3 Get方法测试
"""
# Create some sample BasicMemory instances
memory1 = BasicMemory(
memory_id="1",
memory_count=1,
type_count=1,
memory_type="event",
depth=1,
created=datetime.now(),
expiration=datetime.now() + timedelta(days=30),
subject="Subject1",
predicate="Predicate1",
object="Object1",
content="This is content 1",
embedding_key="embedding_key_1",
poignancy=1,
keywords=["keyword1", "keyword2"],
filling=["memory_id_2"],
)
memory2 = BasicMemory(
memory_id="2",
memory_count=2,
type_count=2,
memory_type="thought",
depth=2,
created=datetime.now(),
expiration=datetime.now() + timedelta(days=30),
subject="Subject2",
predicate="Predicate2",
object="Object2",
content="This is content 2",
embedding_key="embedding_key_2",
poignancy=2,
keywords=["keyword3", "keyword4"],
filling=[],
)
@pytest.fixture
def basic_mem_set():
basic_mem2 = memory2
yield basic_mem2
def test_basic_mem_function(basic_mem_set):
a, b, c = basic_mem_set.summary()
logger.info(f"{a}{b}{c}")
assert a == "Subject2"
def test_basic_mem_save(basic_mem_set):
result = basic_mem_set.save_to_dict()
logger.info(f"save结果为{result}")
if __name__ == "__main__":
pytest.main()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/ext/stanford_town/memory/test_spatial_memory.py | tests/metagpt/ext/stanford_town/memory/test_spatial_memory.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : the unittest of MemoryTree
from metagpt.ext.stanford_town.memory.spatial_memory import MemoryTree
from metagpt.ext.stanford_town.utils.const import STORAGE_PATH
def test_spatial_memory():
f_path = STORAGE_PATH.joinpath(
"base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/spatial_memory.json"
)
x = MemoryTree()
x.set_mem_path(f_path)
assert x.tree
assert "the Ville" in x.tree
assert "Isabella Rodriguez's apartment" in x.get_str_accessible_sectors("the Ville")
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/ext/stanford_town/memory/__init__.py | tests/metagpt/ext/stanford_town/memory/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/ext/stanford_town/memory/test_agent_memory.py | tests/metagpt/ext/stanford_town/memory/test_agent_memory.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : the unittest of AgentMemory
from datetime import datetime, timedelta
import pytest
from metagpt.ext.stanford_town.memory.agent_memory import AgentMemory
from metagpt.ext.stanford_town.memory.retrieve import agent_retrieve
from metagpt.ext.stanford_town.utils.const import STORAGE_PATH
from metagpt.logs import logger
"""
memory测试思路
1. Basic Memory测试
2. Agent Memory测试
2.1 Load & Save方法测试; Load方法中使用了add方法,验证Load即可验证所有add
2.2 Get方法测试
"""
memory_easy_storage_path = STORAGE_PATH.joinpath(
"base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory",
)
memroy_chat_storage_path = STORAGE_PATH.joinpath(
"base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory",
)
memory_save_easy_test_path = STORAGE_PATH.joinpath(
"base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/test_memory",
)
memory_save_chat_test_path = STORAGE_PATH.joinpath(
"base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/test_memory",
)
class TestAgentMemory:
@pytest.fixture
def agent_memory(self):
# 创建一个AgentMemory实例并返回,可以在所有测试用例中共享
test_agent_memory = AgentMemory()
test_agent_memory.set_mem_path(memroy_chat_storage_path)
return test_agent_memory
def test_load(self, agent_memory):
logger.info(f"存储路径为:{agent_memory.memory_saved}")
logger.info(f"存储记忆条数为:{len(agent_memory.storage)}")
logger.info(f"kw_strength为{agent_memory.kw_strength_event},{agent_memory.kw_strength_thought}")
logger.info(f"embeeding.json条数为{len(agent_memory.embeddings)}")
assert agent_memory.embeddings is not None
def test_save(self, agent_memory):
try:
agent_memory.save(memory_save_chat_test_path)
logger.info("成功存储")
except:
pass
def test_summary_function(self, agent_memory):
logger.info(f"event长度为{len(agent_memory.event_list)}")
logger.info(f"thought长度为{len(agent_memory.thought_list)}")
logger.info(f"chat长度为{len(agent_memory.chat_list)}")
result1 = agent_memory.get_summarized_latest_events(4)
logger.info(f"总结最近事件结果为:{result1}")
def test_get_last_chat_function(self, agent_memory):
result2 = agent_memory.get_last_chat("customers")
logger.info(f"上一次对话是{result2}")
def test_retrieve_function(self, agent_memory):
focus_points = ["who i love?"]
retrieved = dict()
for focal_pt in focus_points:
nodes = [
[i.last_accessed, i]
for i in agent_memory.event_list + agent_memory.thought_list
if "idle" not in i.embedding_key
]
nodes = sorted(nodes, key=lambda x: x[0])
nodes = [i for created, i in nodes]
results = agent_retrieve(agent_memory, datetime.now() - timedelta(days=120), 0.99, focal_pt, nodes, 5)
final_result = []
for n in results:
for i in agent_memory.storage:
if i.memory_id == n:
i.last_accessed = datetime.now() - timedelta(days=120)
final_result.append(i)
retrieved[focal_pt] = final_result
logger.info(f"检索结果为{retrieved}")
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/ext/stanford_town/roles/__init__.py | tests/metagpt/ext/stanford_town/roles/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/ext/stanford_town/roles/test_st_role.py | tests/metagpt/ext/stanford_town/roles/test_st_role.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : the unittest of STRole
import pytest
from metagpt.environment import StanfordTownEnv
from metagpt.ext.stanford_town.memory.agent_memory import BasicMemory
from metagpt.ext.stanford_town.roles.st_role import STRole
from metagpt.ext.stanford_town.utils.const import MAZE_ASSET_PATH
@pytest.mark.asyncio
async def test_observe():
role = STRole(
sim_code="base_the_ville_isabella_maria_klaus",
start_time="February 13, 2023",
curr_time="February 13, 2023, 00:00:00",
)
role.set_env(StanfordTownEnv(maze_asset_path=MAZE_ASSET_PATH))
await role.init_curr_tile()
ret_events = await role.observe()
assert ret_events
for event in ret_events:
assert isinstance(event, BasicMemory)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/provider/test_general_api_requestor.py | tests/metagpt/provider/test_general_api_requestor.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : the unittest of APIRequestor
import pytest
from metagpt.provider.general_api_requestor import (
GeneralAPIRequestor,
parse_stream,
parse_stream_helper,
)
api_requestor = GeneralAPIRequestor(base_url="http://www.baidu.com")
def test_parse_stream():
assert parse_stream_helper(None) is None
assert parse_stream_helper(b"data: [DONE]") is None
assert parse_stream_helper(b"data: test") == b"test"
assert parse_stream_helper(b"test") is None
for line in parse_stream([b"data: test"]):
assert line == b"test"
def test_api_requestor():
resp, _, _ = api_requestor.request(method="get", url="/s?wd=baidu")
assert b"baidu" in resp
@pytest.mark.asyncio
async def test_async_api_requestor():
resp, _, _ = await api_requestor.arequest(method="get", url="/s?wd=baidu")
assert b"baidu" in resp
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/provider/test_bedrock_api.py | tests/metagpt/provider/test_bedrock_api.py | import json
import pytest
from metagpt.provider.bedrock.utils import (
NOT_SUPPORT_STREAM_MODELS,
SUPPORT_STREAM_MODELS,
)
from metagpt.provider.bedrock_api import BedrockLLM
from tests.metagpt.provider.mock_llm_config import mock_llm_config_bedrock
from tests.metagpt.provider.req_resp_const import (
BEDROCK_PROVIDER_REQUEST_BODY,
BEDROCK_PROVIDER_RESPONSE_BODY,
)
# all available model from bedrock
models = SUPPORT_STREAM_MODELS | NOT_SUPPORT_STREAM_MODELS
messages = [{"role": "user", "content": "Hi!"}]
usage = {
"prompt_tokens": 1000000,
"completion_tokens": 1000000,
}
def get_provider_name(model: str) -> str:
arr = model.split(".")
if len(arr) == 2:
provider, model_name = arr # meta、mistral……
elif len(arr) == 3:
# some model_ids may contain country like us.xx.xxx
_, provider, model_name = arr
return provider
def deal_special_provider(provider: str, model: str, stream: bool = False) -> str:
# for ai21
if "j2-" in model:
provider = f"{provider}-j2"
elif "jamba-" in model:
provider = f"{provider}-jamba"
elif "command-r" in model:
provider = f"{provider}-command-r"
if stream and "ai21" in model:
provider = f"{provider}-stream"
return provider
async def mock_invoke_model(self: BedrockLLM, *args, **kwargs) -> dict:
provider = get_provider_name(self.model)
self._update_costs(usage, self.model)
provider = deal_special_provider(provider, self.model)
return BEDROCK_PROVIDER_RESPONSE_BODY[provider]
async def mock_invoke_model_stream(self: BedrockLLM, *args, **kwargs) -> dict:
# use json object to mock EventStream
def dict2bytes(x):
return json.dumps(x).encode("utf-8")
provider = get_provider_name(self.model)
if provider == "amazon":
response_body_bytes = dict2bytes({"outputText": "Hello World"})
elif provider == "anthropic":
response_body_bytes = dict2bytes(
{"type": "content_block_delta", "index": 0, "delta": {"type": "text_delta", "text": "Hello World"}}
)
elif provider == "cohere":
response_body_bytes = dict2bytes({"is_finished": False, "text": "Hello World"})
else:
provider = deal_special_provider(provider, self.model, stream=True)
response_body_bytes = dict2bytes(BEDROCK_PROVIDER_RESPONSE_BODY[provider])
response_body_stream = {"body": [{"chunk": {"bytes": response_body_bytes}}]}
self._update_costs(usage, self.model)
return response_body_stream
def get_bedrock_request_body(model_id) -> dict:
provider = get_provider_name(model_id)
provider = deal_special_provider(provider, model_id)
return BEDROCK_PROVIDER_REQUEST_BODY[provider]
def is_subset(subset, superset) -> bool:
"""Ensure all fields in request body are allowed.
```python
subset = {"prompt": "hello","kwargs": {"temperature": 0.9,"p": 0.0}}
superset = {"prompt": "hello", "kwargs": {"temperature": 0.0, "top-p": 0.0}}
is_subset(subset, superset)
```
"""
for key, value in subset.items():
if key not in superset:
return False
if isinstance(value, dict):
if not isinstance(superset[key], dict):
return False
if not is_subset(value, superset[key]):
return False
return True
@pytest.fixture(scope="class", params=models)
def bedrock_api(request) -> BedrockLLM:
model_id = request.param
mock_llm_config_bedrock.model = model_id
api = BedrockLLM(mock_llm_config_bedrock)
return api
class TestBedrockAPI:
def _patch_invoke_model(self, mocker):
mocker.patch("metagpt.provider.bedrock_api.BedrockLLM.invoke_model", mock_invoke_model)
def _patch_invoke_model_stream(self, mocker):
mocker.patch(
"metagpt.provider.bedrock_api.BedrockLLM.invoke_model_with_response_stream",
mock_invoke_model_stream,
)
def test_get_request_body(self, bedrock_api: BedrockLLM):
"""Ensure request body has correct format"""
provider = bedrock_api.provider
request_body = json.loads(provider.get_request_body(messages, bedrock_api._const_kwargs))
assert is_subset(request_body, get_bedrock_request_body(bedrock_api.config.model))
@pytest.mark.asyncio
async def test_aask(self, bedrock_api: BedrockLLM, mocker):
self._patch_invoke_model(mocker)
self._patch_invoke_model_stream(mocker)
assert await bedrock_api.aask(messages, stream=False) == "Hello World"
assert await bedrock_api.aask(messages, stream=True) == "Hello World"
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/provider/test_zhipuai_api.py | tests/metagpt/provider/test_zhipuai_api.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : the unittest of ZhiPuAILLM
import pytest
from metagpt.provider.zhipuai_api import ZhiPuAILLM
from tests.metagpt.provider.mock_llm_config import mock_llm_config_zhipu
from tests.metagpt.provider.req_resp_const import (
get_part_chat_completion,
llm_general_chat_funcs_test,
messages,
prompt,
resp_cont_tmpl,
)
name = "ChatGLM-4"
resp_cont = resp_cont_tmpl.format(name=name)
default_resp = get_part_chat_completion(name)
async def mock_zhipuai_acreate_stream(self, **kwargs):
class MockResponse(object):
async def _aread(self):
class Iterator(object):
events = [{"choices": [{"index": 0, "delta": {"content": resp_cont, "role": "assistant"}}]}]
async def __aiter__(self):
for event in self.events:
yield event
async for chunk in Iterator():
yield chunk
async def stream(self):
async for chunk in self._aread():
yield chunk
return MockResponse()
async def mock_zhipuai_acreate(self, **kwargs) -> dict:
return default_resp
@pytest.mark.asyncio
async def test_zhipuai_acompletion(mocker):
mocker.patch("metagpt.provider.zhipuai.zhipu_model_api.ZhiPuModelAPI.acreate", mock_zhipuai_acreate)
mocker.patch("metagpt.provider.zhipuai.zhipu_model_api.ZhiPuModelAPI.acreate_stream", mock_zhipuai_acreate_stream)
zhipu_llm = ZhiPuAILLM(mock_llm_config_zhipu)
resp = await zhipu_llm.acompletion(messages)
assert resp["choices"][0]["message"]["content"] == resp_cont
await llm_general_chat_funcs_test(zhipu_llm, prompt, messages, resp_cont)
def test_zhipuai_proxy():
# it seems like zhipuai would be inflected by the proxy of openai, maybe it's a bug
# but someone may want to use openai.proxy, so we keep this test case
# assert openai.proxy == config.llm.proxy
_ = ZhiPuAILLM(mock_llm_config_zhipu)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/provider/test_openai.py | tests/metagpt/provider/test_openai.py | import pytest
from openai.types.chat import (
ChatCompletion,
ChatCompletionChunk,
ChatCompletionMessage,
ChatCompletionMessageToolCall,
)
from openai.types.chat.chat_completion import Choice, CompletionUsage
from openai.types.chat.chat_completion_message_tool_call import Function
from PIL import Image
from metagpt.configs.compress_msg_config import CompressType
from metagpt.const import TEST_DATA_PATH
from metagpt.llm import LLM
from metagpt.logs import logger
from metagpt.provider import OpenAILLM
from tests.metagpt.provider.mock_llm_config import (
mock_llm_config,
mock_llm_config_proxy,
)
from tests.metagpt.provider.req_resp_const import (
get_openai_chat_completion,
get_openai_chat_completion_chunk,
llm_general_chat_funcs_test,
messages,
prompt,
resp_cont_tmpl,
)
name = "AI assistant"
resp_cont = resp_cont_tmpl.format(name=name)
default_resp = get_openai_chat_completion(name)
default_resp_chunk = get_openai_chat_completion_chunk(name, usage_as_dict=True)
usage = CompletionUsage(completion_tokens=110, prompt_tokens=92, total_tokens=202)
@pytest.mark.asyncio
async def test_text_to_speech():
llm = LLM()
resp = await llm.atext_to_speech(
model="tts-1",
voice="alloy",
input="人生说起来长,但直到一个岁月回头看,许多事件仅是仓促的。一段一段拼凑一起,合成了人生。苦难当头时,当下不免觉得是折磨;回头看,也不够是一段短短的人生旅程。",
)
assert 200 == resp.response.status_code
@pytest.mark.asyncio
async def test_speech_to_text():
llm = LLM()
audio_file = open(f"{TEST_DATA_PATH}/audio/hello.mp3", "rb")
resp = await llm.aspeech_to_text(file=audio_file, model="whisper-1")
assert "你好" == resp.text
@pytest.fixture
def tool_calls_rsp():
function_rsps = [
Function(arguments='{\n"language": "python",\n"code": "print(\'hello world\')"}', name="execute"),
]
tool_calls = [
ChatCompletionMessageToolCall(type="function", id=f"call_{i}", function=f) for i, f in enumerate(function_rsps)
]
messages = [ChatCompletionMessage(content=None, role="assistant", tool_calls=[t]) for t in tool_calls]
# 添加一个纯文本响应
messages.append(
ChatCompletionMessage(content="Completed a python code for hello world!", role="assistant", tool_calls=None)
)
# 添加 openai tool calls respond bug, code 出现在ChatCompletionMessage.content中
messages.extend(
[
ChatCompletionMessage(content="```python\nprint('hello world')```", role="assistant", tool_calls=None),
]
)
choices = [
Choice(finish_reason="tool_calls", logprobs=None, index=i, message=msg) for i, msg in enumerate(messages)
]
return [
ChatCompletion(id=str(i), choices=[c], created=i, model="gpt-4", object="chat.completion")
for i, c in enumerate(choices)
]
@pytest.fixture
def json_decode_error():
function_rsp = Function(arguments='{\n"language": \'python\',\n"code": "print(\'hello world\')"}', name="execute")
tool_calls = [ChatCompletionMessageToolCall(type="function", id=f"call_{0}", function=function_rsp)]
message = ChatCompletionMessage(content=None, role="assistant", tool_calls=tool_calls)
choices = [Choice(finish_reason="tool_calls", logprobs=None, index=0, message=message)]
return ChatCompletion(id="0", choices=choices, created=0, model="gpt-4", object="chat.completion")
class TestOpenAI:
def test_make_client_kwargs_without_proxy(self):
instance = OpenAILLM(mock_llm_config)
kwargs = instance._make_client_kwargs()
assert kwargs["api_key"] == "mock_api_key"
assert kwargs["base_url"] == "mock_base_url"
assert "http_client" not in kwargs
def test_make_client_kwargs_with_proxy(self):
instance = OpenAILLM(mock_llm_config_proxy)
kwargs = instance._make_client_kwargs()
assert "http_client" in kwargs
def test_get_choice_function_arguments_for_aask_code(self, tool_calls_rsp):
instance = OpenAILLM(mock_llm_config_proxy)
for i, rsp in enumerate(tool_calls_rsp):
code = instance.get_choice_function_arguments(rsp)
logger.info(f"\ntest get function call arguments {i}: {code}")
assert "code" in code
assert "language" in code
assert "hello world" in code["code"]
logger.info(f'code is : {code["code"]}')
if "Completed a python code for hello world!" == code["code"]:
code["language"] == "markdown"
else:
code["language"] == "python"
def test_aask_code_json_decode_error(self, json_decode_error):
instance = OpenAILLM(mock_llm_config)
code = instance.get_choice_function_arguments(json_decode_error)
assert "code" in code
assert "language" in code
assert "hello world" in code["code"]
logger.info(f'code is : {code["code"]}')
@pytest.mark.asyncio
async def test_gen_image():
llm = LLM()
model = "dall-e-3"
prompt = 'a logo with word "MetaGPT"'
images: list[Image] = await llm.gen_image(model=model, prompt=prompt)
assert images[0].size == (1024, 1024)
images: list[Image] = await llm.gen_image(model=model, prompt=prompt, resp_format="b64_json")
assert images[0].size == (1024, 1024)
async def mock_openai_acompletions_create(self, stream: bool = False, **kwargs) -> ChatCompletionChunk:
if stream:
class Iterator(object):
async def __aiter__(self):
yield default_resp_chunk
return Iterator()
else:
return default_resp
@pytest.mark.asyncio
async def test_openai_acompletion(mocker):
mocker.patch("openai.resources.chat.completions.AsyncCompletions.create", mock_openai_acompletions_create)
llm = OpenAILLM(mock_llm_config)
resp = await llm.acompletion(messages)
assert resp.choices[0].finish_reason == "stop"
assert resp.choices[0].message.content == resp_cont
assert resp.usage == usage
await llm_general_chat_funcs_test(llm, prompt, messages, resp_cont)
def test_count_tokens():
llm = LLM()
llm.model = "gpt-4o"
messages = [
llm._system_msg("some system msg"),
llm._system_msg("some system message 2"),
llm._user_msg("user 1"),
llm._assistant_msg("assistant 1"),
llm._user_msg("user 1"),
llm._assistant_msg("assistant 2"),
]
cnt = llm.count_tokens(messages)
assert cnt == 47
def test_count_tokens_long():
llm = LLM()
llm.model = "gpt-4-0613"
test_msg_content = " ".join([str(i) for i in range(100000)])
messages = [
llm._system_msg("You are a helpful assistant"),
llm._user_msg(test_msg_content + " what's the first number you see?"),
]
cnt = llm.count_tokens(messages) # 299023, ~300k
assert 290000 <= cnt <= 300000
llm.model = "test_llm" # a non-openai model, will use heuristics base count_tokens
cnt = llm.count_tokens(messages) # 294474, ~300k, ~2% difference
assert 290000 <= cnt <= 300000
@pytest.mark.skip
@pytest.mark.asyncio
async def test_aask_long():
llm = LLM()
llm.model = "deepseek-ai/DeepSeek-Coder-V2-Instruct" # deepseek-coder on siliconflow, limit 32k
llm.config.compress_type = CompressType.POST_CUT_BY_TOKEN
test_msg_content = " ".join([str(i) for i in range(100000)]) # corresponds to ~300k tokens
messages = [
llm._system_msg("You are a helpful assistant"),
llm._user_msg(test_msg_content + " what's the first number you see?"),
]
await llm.aask(messages) # should not fail with context truncated
@pytest.mark.skip
@pytest.mark.asyncio
async def test_aask_long_no_compress():
llm = LLM()
llm.model = "deepseek-ai/DeepSeek-Coder-V2-Instruct" # deepseek-coder on siliconflow, limit 32k
# Not specifying llm.config.compress_type will use default "", no compress
test_msg_content = " ".join([str(i) for i in range(100000)]) # corresponds to ~300k tokens
messages = [
llm._system_msg("You are a helpful assistant"),
llm._user_msg(test_msg_content + " what's the first number you see?"),
]
with pytest.raises(Exception):
await llm.aask(messages) # should fail
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/provider/test_google_gemini_api.py | tests/metagpt/provider/test_google_gemini_api.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : the unittest of google gemini api
from abc import ABC
from dataclasses import dataclass
import pytest
from google.ai import generativelanguage as glm
from google.generativeai.types import content_types
from metagpt.provider.google_gemini_api import GeminiLLM
from tests.metagpt.provider.mock_llm_config import mock_llm_config
from tests.metagpt.provider.req_resp_const import (
gemini_messages,
llm_general_chat_funcs_test,
prompt,
resp_cont_tmpl,
)
@dataclass
class MockGeminiResponse(ABC):
text: str
resp_cont = resp_cont_tmpl.format(name="gemini")
default_resp = MockGeminiResponse(text=resp_cont)
def mock_gemini_count_tokens(self, contents: content_types.ContentsType) -> glm.CountTokensResponse:
return glm.CountTokensResponse(total_tokens=20)
async def mock_gemini_count_tokens_async(self, contents: content_types.ContentsType) -> glm.CountTokensResponse:
return glm.CountTokensResponse(total_tokens=20)
def mock_gemini_generate_content(self, **kwargs) -> MockGeminiResponse:
return default_resp
async def mock_gemini_generate_content_async(self, stream: bool = False, **kwargs) -> MockGeminiResponse:
if stream:
class Iterator(object):
async def __aiter__(self):
yield default_resp
return Iterator()
else:
return default_resp
@pytest.mark.asyncio
async def test_gemini_acompletion(mocker):
mocker.patch("metagpt.provider.google_gemini_api.GeminiGenerativeModel.count_tokens", mock_gemini_count_tokens)
mocker.patch(
"metagpt.provider.google_gemini_api.GeminiGenerativeModel.count_tokens_async", mock_gemini_count_tokens_async
)
mocker.patch("google.generativeai.generative_models.GenerativeModel.generate_content", mock_gemini_generate_content)
mocker.patch(
"google.generativeai.generative_models.GenerativeModel.generate_content_async",
mock_gemini_generate_content_async,
)
gemini_llm = GeminiLLM(mock_llm_config)
assert gemini_llm._user_msg(prompt) == {"role": "user", "parts": [prompt]}
assert gemini_llm._assistant_msg(prompt) == {"role": "model", "parts": [prompt]}
usage = gemini_llm.get_usage(gemini_messages, resp_cont)
assert usage == {"prompt_tokens": 20, "completion_tokens": 20}
resp = gemini_llm.completion(gemini_messages)
assert resp == default_resp
resp = await gemini_llm.acompletion(gemini_messages)
assert resp.text == default_resp.text
await llm_general_chat_funcs_test(gemini_llm, prompt, gemini_messages, resp_cont)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/provider/test_azure_llm.py | tests/metagpt/provider/test_azure_llm.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
from metagpt.provider import AzureOpenAILLM
from tests.metagpt.provider.mock_llm_config import mock_llm_config_azure
def test_azure_llm():
llm = AzureOpenAILLM(mock_llm_config_azure)
kwargs = llm._make_client_kwargs()
assert kwargs["azure_endpoint"] == mock_llm_config_azure.base_url
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/provider/req_resp_const.py | tests/metagpt/provider/req_resp_const.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : default request & response data for provider unittest
from anthropic.types import (
ContentBlock,
ContentBlockDeltaEvent,
Message,
MessageStartEvent,
TextDelta,
)
from anthropic.types import Usage as AnthropicUsage
from dashscope.api_entities.dashscope_response import (
DashScopeAPIResponse,
GenerationOutput,
GenerationResponse,
GenerationUsage,
)
from openai.types.chat.chat_completion import (
ChatCompletion,
ChatCompletionMessage,
Choice,
)
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
from openai.types.chat.chat_completion_chunk import Choice as AChoice
from openai.types.chat.chat_completion_chunk import ChoiceDelta
from openai.types.completion_usage import CompletionUsage
from qianfan.resources.typing import QfResponse
from metagpt.provider.base_llm import BaseLLM
prompt = "who are you?"
messages = [{"role": "user", "content": prompt}]
resp_cont_tmpl = "I'm {name}"
default_resp_cont = resp_cont_tmpl.format(name="GPT")
# part of whole ChatCompletion of openai like structure
def get_part_chat_completion(name: str) -> dict:
part_chat_completion = {
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": resp_cont_tmpl.format(name=name),
},
"finish_reason": "stop",
}
],
"usage": {"completion_tokens": 22, "prompt_tokens": 19, "total_tokens": 41},
}
return part_chat_completion
def get_openai_chat_completion(name: str) -> ChatCompletion:
openai_chat_completion = ChatCompletion(
id="cmpl-a6652c1bb181caae8dd19ad8",
model="xx/xxx",
object="chat.completion",
created=1703300855,
choices=[
Choice(
finish_reason="stop",
index=0,
message=ChatCompletionMessage(role="assistant", content=resp_cont_tmpl.format(name=name)),
logprobs=None,
)
],
usage=CompletionUsage(completion_tokens=110, prompt_tokens=92, total_tokens=202),
)
return openai_chat_completion
def get_openai_chat_completion_chunk(name: str, usage_as_dict: bool = False) -> ChatCompletionChunk:
usage = CompletionUsage(completion_tokens=110, prompt_tokens=92, total_tokens=202)
usage = usage if not usage_as_dict else usage.model_dump()
openai_chat_completion_chunk = ChatCompletionChunk(
id="cmpl-a6652c1bb181caae8dd19ad8",
model="xx/xxx",
object="chat.completion.chunk",
created=1703300855,
choices=[
AChoice(
delta=ChoiceDelta(role="assistant", content=resp_cont_tmpl.format(name=name)),
finish_reason="stop",
index=0,
logprobs=None,
)
],
usage=usage,
)
return openai_chat_completion_chunk
# For gemini
gemini_messages = [{"role": "user", "parts": prompt}]
# For QianFan
qf_jsonbody_dict = {
"id": "as-4v1h587fyv",
"object": "chat.completion",
"created": 1695021339,
"result": "",
"is_truncated": False,
"need_clear_history": False,
"usage": {"prompt_tokens": 7, "completion_tokens": 15, "total_tokens": 22},
}
def get_qianfan_response(name: str) -> QfResponse:
qf_jsonbody_dict["result"] = resp_cont_tmpl.format(name=name)
return QfResponse(code=200, body=qf_jsonbody_dict)
# For DashScope
def get_dashscope_response(name: str) -> GenerationResponse:
return GenerationResponse.from_api_response(
DashScopeAPIResponse(
status_code=200,
output=GenerationOutput(
**{
"text": "",
"finish_reason": "",
"choices": [
{
"finish_reason": "stop",
"message": {"role": "assistant", "content": resp_cont_tmpl.format(name=name)},
}
],
}
),
usage=GenerationUsage(**{"input_tokens": 12, "output_tokens": 98, "total_tokens": 110}),
)
)
# For Anthropic
def get_anthropic_response(name: str, stream: bool = False) -> Message:
if stream:
return [
MessageStartEvent(
message=Message(
id="xxx",
model=name,
role="assistant",
type="message",
content=[ContentBlock(text="", type="text")],
usage=AnthropicUsage(input_tokens=10, output_tokens=10),
),
type="message_start",
),
ContentBlockDeltaEvent(
index=0,
delta=TextDelta(text=resp_cont_tmpl.format(name=name), type="text_delta"),
type="content_block_delta",
),
]
else:
return Message(
id="xxx",
model=name,
role="assistant",
type="message",
content=[ContentBlock(text=resp_cont_tmpl.format(name=name), type="text")],
usage=AnthropicUsage(input_tokens=10, output_tokens=10),
)
# For llm general chat functions call
async def llm_general_chat_funcs_test(llm: BaseLLM, prompt: str, messages: list[dict], resp_cont: str):
resp = await llm.aask(prompt, stream=False)
assert resp == resp_cont
resp = await llm.aask(prompt)
assert resp == resp_cont
resp = await llm.acompletion_text(messages, stream=False)
assert resp == resp_cont
resp = await llm.acompletion_text(messages, stream=True)
assert resp == resp_cont
# For Amazon Bedrock
# Check the API documentation of each model
# https://docs.aws.amazon.com/bedrock/latest/userguide
BEDROCK_PROVIDER_REQUEST_BODY = {
"mistral": {"prompt": "", "max_tokens": 0, "stop": [], "temperature": 0.0, "top_p": 0.0, "top_k": 0},
"meta": {"prompt": "", "temperature": 0.0, "top_p": 0.0, "max_gen_len": 0},
"ai21-j2": {
"prompt": "",
"temperature": 0.0,
"topP": 0.0,
"maxTokens": 0,
"stopSequences": [],
"countPenalty": {"scale": 0.0},
"presencePenalty": {"scale": 0.0},
"frequencyPenalty": {"scale": 0.0},
},
"ai21-jamba": {
"messages": [],
"temperature": 0.0,
"topP": 0.0,
"max_tokens": 0,
"stopSequences": [],
"countPenalty": {"scale": 0.0},
"presencePenalty": {"scale": 0.0},
"frequencyPenalty": {"scale": 0.0},
},
"cohere": {
"prompt": "",
"temperature": 0.0,
"p": 0.0,
"k": 0.0,
"max_tokens": 0,
"stop_sequences": [],
"return_likelihoods": "NONE",
"stream": False,
"num_generations": 0,
"logit_bias": {},
"truncate": "NONE",
},
"cohere-command-r": {
"message": [],
"chat_history": [],
"temperature": 0.0,
"p": 0.0,
"k": 0.0,
"max_tokens": 0,
"stop_sequences": [],
"return_likelihoods": "NONE",
"stream": False,
"num_generations": 0,
"logit_bias": {},
"truncate": "NONE",
},
"anthropic": {
"anthropic_version": "bedrock-2023-05-31",
"max_tokens": 0,
"system": "",
"messages": [{"role": "", "content": ""}],
"temperature": 0.0,
"top_p": 0.0,
"top_k": 0,
"stop_sequences": [],
},
"amazon": {
"inputText": "",
"textGenerationConfig": {"temperature": 0.0, "topP": 0.0, "maxTokenCount": 0, "stopSequences": []},
},
}
BEDROCK_PROVIDER_RESPONSE_BODY = {
"mistral": {"outputs": [{"text": "Hello World", "stop_reason": ""}]},
"meta": {"generation": "Hello World", "prompt_token_count": 0, "generation_token_count": 0, "stop_reason": ""},
"ai21-jamba": {
"id": "",
"prompt": {"text": "Hello World", "tokens": []},
"choices": [{"message": {"content": "Hello World"}}],
},
"ai21-jamba-stream": {
"id": "",
"prompt": {"text": "Hello World", "tokens": []},
"choices": [{"delta": {"content": "Hello World"}}],
},
"ai21-j2": {
"id": "",
"prompt": {"text": "Hello World", "tokens": []},
"completions": [{"data": {"text": "Hello World"}, "finishReason": {"reason": "length", "length": 2}}],
},
"cohere": {
"generations": [
{
"finish_reason": "",
"id": "",
"text": "Hello World",
"likelihood": 0.0,
"token_likelihoods": [{"token": 0.0}],
"is_finished": True,
"index": 0,
}
],
"id": "",
"prompt": "",
},
"cohere-command-r": {
"generations": [
{
"finish_reason": "",
"id": "",
"text": "Hello World",
"likelihood": 0.0,
"token_likelihoods": [{"token": 0.0}],
"is_finished": True,
"index": 0,
}
],
"id": "",
"prompt": "",
},
"anthropic": {
"id": "",
"model": "",
"type": "message",
"role": "assistant",
"content": [{"type": "text", "text": "Hello World"}],
"stop_reason": "",
"stop_sequence": "",
"usage": {"input_tokens": 0, "output_tokens": 0},
},
"amazon": {
"inputTextTokenCount": 0,
"results": [{"tokenCount": 0, "outputText": "Hello World", "completionReason": ""}],
},
}
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/provider/conftest.py | tests/metagpt/provider/conftest.py | import pytest
@pytest.fixture(autouse=True)
def llm_mock(rsp_cache, mocker, request):
# An empty fixture to overwrite the global llm_mock fixture
# because in provider folder, we want to test the aask and aask functions for the specific models
pass
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/provider/test_ark.py | tests/metagpt/provider/test_ark.py | """
用于火山方舟Python SDK V3的测试用例
API文档:https://www.volcengine.com/docs/82379/1263482
"""
from typing import AsyncIterator, List, Union
import pytest
from openai.types.chat import ChatCompletion, ChatCompletionChunk
from openai.types.chat.chat_completion_chunk import Choice, ChoiceDelta
from metagpt.provider.ark_api import ArkLLM
from tests.metagpt.provider.mock_llm_config import mock_llm_config_ark
from tests.metagpt.provider.req_resp_const import (
get_openai_chat_completion,
llm_general_chat_funcs_test,
messages,
prompt,
resp_cont_tmpl,
)
name = "AI assistant"
resp_cont = resp_cont_tmpl.format(name=name)
USAGE = {"completion_tokens": 1000, "prompt_tokens": 1000, "total_tokens": 2000}
default_resp = get_openai_chat_completion(name)
default_resp.model = "doubao-pro-32k-240515"
default_resp.usage = USAGE
def create_chat_completion_chunk(
content: str, finish_reason: str = None, choices: List[Choice] = None
) -> ChatCompletionChunk:
if choices is None:
choices = [
Choice(
delta=ChoiceDelta(content=content, function_call=None, role="assistant", tool_calls=None),
finish_reason=finish_reason,
index=0,
logprobs=None,
)
]
return ChatCompletionChunk(
id="012",
choices=choices,
created=1716278586,
model="doubao-pro-32k-240515",
object="chat.completion.chunk",
system_fingerprint=None,
usage=None if choices else USAGE,
)
ark_resp_chunk = create_chat_completion_chunk(content="")
ark_resp_chunk_finish = create_chat_completion_chunk(content=resp_cont, finish_reason="stop")
ark_resp_chunk_last = create_chat_completion_chunk(content="", choices=[])
async def chunk_iterator(chunks: List[ChatCompletionChunk]) -> AsyncIterator[ChatCompletionChunk]:
for chunk in chunks:
yield chunk
async def mock_ark_acompletions_create(
self, stream: bool = False, **kwargs
) -> Union[ChatCompletionChunk, ChatCompletion]:
if stream:
chunks = [ark_resp_chunk, ark_resp_chunk_finish, ark_resp_chunk_last]
return chunk_iterator(chunks)
else:
return default_resp
@pytest.mark.asyncio
async def test_ark_acompletion(mocker):
mocker.patch("openai.resources.chat.completions.AsyncCompletions.create", mock_ark_acompletions_create)
llm = ArkLLM(mock_llm_config_ark)
resp = await llm.acompletion(messages)
assert resp.choices[0].finish_reason == "stop"
assert resp.choices[0].message.content == resp_cont
assert resp.usage == USAGE
await llm_general_chat_funcs_test(llm, prompt, messages, resp_cont)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/provider/test_anthropic_api.py | tests/metagpt/provider/test_anthropic_api.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : the unittest of Claude2
import pytest
from anthropic.resources.completions import Completion
from metagpt.provider.anthropic_api import AnthropicLLM
from tests.metagpt.provider.mock_llm_config import mock_llm_config_anthropic
from tests.metagpt.provider.req_resp_const import (
get_anthropic_response,
llm_general_chat_funcs_test,
messages,
prompt,
resp_cont_tmpl,
)
name = "claude-3-opus-20240229"
resp_cont = resp_cont_tmpl.format(name=name)
async def mock_anthropic_messages_create(
self, messages: list[dict], model: str, stream: bool = True, max_tokens: int = None, system: str = None
) -> Completion:
if stream:
async def aresp_iterator():
resps = get_anthropic_response(name, stream=True)
for resp in resps:
yield resp
return aresp_iterator()
else:
return get_anthropic_response(name)
@pytest.mark.asyncio
async def test_anthropic_acompletion(mocker):
mocker.patch("anthropic.resources.messages.AsyncMessages.create", mock_anthropic_messages_create)
anthropic_llm = AnthropicLLM(mock_llm_config_anthropic)
resp = await anthropic_llm.acompletion(messages)
assert resp.content[0].text == resp_cont
await llm_general_chat_funcs_test(anthropic_llm, prompt, messages, resp_cont)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/provider/test_dashscope_api.py | tests/metagpt/provider/test_dashscope_api.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : the unittest of DashScopeLLM
from typing import AsyncGenerator, Union
import pytest
from dashscope.api_entities.dashscope_response import GenerationResponse
from metagpt.provider.dashscope_api import DashScopeLLM
from tests.metagpt.provider.mock_llm_config import mock_llm_config_dashscope
from tests.metagpt.provider.req_resp_const import (
get_dashscope_response,
llm_general_chat_funcs_test,
messages,
prompt,
resp_cont_tmpl,
)
name = "qwen-max"
resp_cont = resp_cont_tmpl.format(name=name)
@classmethod
def mock_dashscope_call(
cls,
messages: list[dict],
model: str,
api_key: str,
result_format: str,
incremental_output: bool = True,
stream: bool = False,
) -> GenerationResponse:
return get_dashscope_response(name)
@classmethod
async def mock_dashscope_acall(
cls,
messages: list[dict],
model: str,
api_key: str,
result_format: str,
incremental_output: bool = True,
stream: bool = False,
) -> Union[AsyncGenerator[GenerationResponse, None], GenerationResponse]:
resps = [get_dashscope_response(name)]
if stream:
async def aresp_iterator(resps: list[GenerationResponse]):
for resp in resps:
yield resp
return aresp_iterator(resps)
else:
return resps[0]
@pytest.mark.asyncio
async def test_dashscope_acompletion(mocker):
mocker.patch("dashscope.aigc.generation.Generation.call", mock_dashscope_call)
mocker.patch("metagpt.provider.dashscope_api.AGeneration.acall", mock_dashscope_acall)
dashscope_llm = DashScopeLLM(mock_llm_config_dashscope)
resp = dashscope_llm.completion(messages)
assert resp.choices[0]["message"]["content"] == resp_cont
resp = await dashscope_llm.acompletion(messages)
assert resp.choices[0]["message"]["content"] == resp_cont
await llm_general_chat_funcs_test(dashscope_llm, prompt, messages, resp_cont)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/provider/test_general_api_base.py | tests/metagpt/provider/test_general_api_base.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
import os
from typing import AsyncGenerator, Generator, Iterator, Tuple, Union
import aiohttp
import pytest
import requests
from openai import OpenAIError
from metagpt.provider.general_api_base import (
APIRequestor,
ApiType,
OpenAIResponse,
_aiohttp_proxies_arg,
_build_api_url,
_make_session,
_requests_proxies_arg,
log_debug,
log_info,
log_warn,
logfmt,
parse_stream,
parse_stream_helper,
)
def test_basic():
_ = ApiType.from_str("azure")
_ = ApiType.from_str("azuread")
_ = ApiType.from_str("openai")
with pytest.raises(OpenAIError):
_ = ApiType.from_str("xx")
os.environ.setdefault("LLM_LOG", "debug")
log_debug("debug")
log_warn("warn")
log_info("info")
logfmt({"k1": b"v1", "k2": 1, "k3": "a b"})
_build_api_url(url="http://www.baidu.com/s?wd=", query="baidu")
def test_openai_response():
resp = OpenAIResponse(data=[], headers={"retry-after": 3})
assert resp.request_id is None
assert resp.retry_after == 3
assert resp.operation_location is None
assert resp.organization is None
assert resp.response_ms is None
def test_proxy():
assert _requests_proxies_arg(proxy=None) is None
proxy = "127.0.0.1:80"
assert _requests_proxies_arg(proxy=proxy) == {"http": proxy, "https": proxy}
proxy_dict = {"http": proxy}
assert _requests_proxies_arg(proxy=proxy_dict) == proxy_dict
assert _aiohttp_proxies_arg(proxy_dict) == proxy
proxy_dict = {"https": proxy}
assert _requests_proxies_arg(proxy=proxy_dict) == proxy_dict
assert _aiohttp_proxies_arg(proxy_dict) == proxy
assert _make_session() is not None
assert _aiohttp_proxies_arg(None) is None
assert _aiohttp_proxies_arg("test") == "test"
with pytest.raises(ValueError):
_aiohttp_proxies_arg(-1)
def test_parse_stream():
assert parse_stream_helper(None) is None
assert parse_stream_helper(b"data: [DONE]") is None
assert parse_stream_helper(b"data: test") == "test"
assert parse_stream_helper(b"test") is None
for line in parse_stream([b"data: test"]):
assert line == "test"
api_requestor = APIRequestor(base_url="http://www.baidu.com")
def mock_interpret_response(
self, result: requests.Response, stream: bool
) -> Tuple[Union[bytes, Iterator[Generator]], bytes]:
return b"baidu", False
async def mock_interpret_async_response(
self, result: aiohttp.ClientResponse, stream: bool
) -> Tuple[Union[OpenAIResponse, AsyncGenerator[OpenAIResponse, None]], bool]:
return b"baidu", True
def test_requestor_headers():
# validate_headers
headers = api_requestor._validate_headers(None)
assert not headers
with pytest.raises(Exception):
api_requestor._validate_headers(-1)
with pytest.raises(Exception):
api_requestor._validate_headers({1: 2})
with pytest.raises(Exception):
api_requestor._validate_headers({"test": 1})
supplied_headers = {"test": "test"}
assert api_requestor._validate_headers(supplied_headers) == supplied_headers
api_requestor.organization = "test"
api_requestor.api_version = "test123"
api_requestor.api_type = ApiType.OPEN_AI
request_id = "test123"
headers = api_requestor.request_headers(method="post", extra={}, request_id=request_id)
assert headers["LLM-Organization"] == api_requestor.organization
assert headers["LLM-Version"] == api_requestor.api_version
assert headers["X-Request-Id"] == request_id
def test_api_requestor(mocker):
mocker.patch("metagpt.provider.general_api_base.APIRequestor._interpret_response", mock_interpret_response)
resp, _, _ = api_requestor.request(method="get", url="/s?wd=baidu")
resp, _, _ = api_requestor.request(method="post", url="/s?wd=baidu")
@pytest.mark.asyncio
async def test_async_api_requestor(mocker):
mocker.patch(
"metagpt.provider.general_api_base.APIRequestor._interpret_async_response", mock_interpret_async_response
)
resp, _, _ = await api_requestor.arequest(method="get", url="/s?wd=baidu")
resp, _, _ = await api_requestor.arequest(method="post", url="/s?wd=baidu")
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/provider/test_ollama_api.py | tests/metagpt/provider/test_ollama_api.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : the unittest of ollama api
import json
from typing import Any, AsyncGenerator, Tuple
import pytest
from metagpt.provider.ollama_api import OllamaLLM, OpenAIResponse
from tests.metagpt.provider.mock_llm_config import mock_llm_config
from tests.metagpt.provider.req_resp_const import (
llm_general_chat_funcs_test,
messages,
prompt,
resp_cont_tmpl,
)
resp_cont = resp_cont_tmpl.format(name="ollama")
default_resp = {"message": {"role": "assistant", "content": resp_cont}}
async def mock_ollama_arequest(self, stream: bool = False, **kwargs) -> Tuple[Any, Any, bool]:
if stream:
async def async_event_generator() -> AsyncGenerator[Any, None]:
events = [
b'{"message": {"role": "assistant", "content": "I\'m ollama"}, "done": false}',
b'{"prompt_eval_count": 20, "eval_count": 20, "done": true}',
]
for event in events:
yield OpenAIResponse(event, {})
return async_event_generator(), None, None
else:
raw_default_resp = default_resp.copy()
raw_default_resp.update({"prompt_eval_count": 20, "eval_count": 20})
return OpenAIResponse(json.dumps(raw_default_resp).encode(), {}), None, None
@pytest.mark.asyncio
async def test_gemini_acompletion(mocker):
mocker.patch("metagpt.provider.general_api_requestor.GeneralAPIRequestor.arequest", mock_ollama_arequest)
ollama_llm = OllamaLLM(mock_llm_config)
resp = await ollama_llm.acompletion(messages)
assert resp["message"]["content"] == default_resp["message"]["content"]
resp = await ollama_llm.aask(prompt, stream=False)
assert resp == resp_cont
await llm_general_chat_funcs_test(ollama_llm, prompt, messages, resp_cont)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/provider/__init__.py | tests/metagpt/provider/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/6 17:32
@Author : alexanderwu
@File : __init__.py
"""
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/provider/test_base_llm.py | tests/metagpt/provider/test_base_llm.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/7 17:40
@Author : alexanderwu
@File : test_base_llm.py
"""
import pytest
from metagpt.configs.compress_msg_config import CompressType
from metagpt.configs.llm_config import LLMConfig
from metagpt.const import IMAGES
from metagpt.provider.base_llm import BaseLLM
from metagpt.schema import AIMessage, Message, UserMessage
from tests.metagpt.provider.mock_llm_config import mock_llm_config
from tests.metagpt.provider.req_resp_const import (
default_resp_cont,
get_part_chat_completion,
prompt,
)
name = "GPT"
class MockBaseLLM(BaseLLM):
def __init__(self, config: LLMConfig = None):
self.config = config or mock_llm_config
def completion(self, messages: list[dict], timeout=3):
return get_part_chat_completion(name)
async def _achat_completion(self, messages: list[dict], timeout=3):
pass
async def acompletion(self, messages: list[dict], timeout=3):
return get_part_chat_completion(name)
async def _achat_completion_stream(self, messages: list[dict], timeout: int = 3) -> str:
pass
async def acompletion_text(self, messages: list[dict], stream=False, timeout=3) -> str:
return default_resp_cont
def test_base_llm():
message = Message(role="user", content="hello")
assert "role" in message.to_dict()
assert "user" in str(message)
base_llm = MockBaseLLM()
openai_funccall_resp = {
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "test",
"tool_calls": [
{
"id": "call_Y5r6Ddr2Qc2ZrqgfwzPX5l72",
"type": "function",
"function": {
"name": "execute",
"arguments": '{\n "language": "python",\n "code": "print(\'Hello, World!\')"\n}',
},
}
],
},
"finish_reason": "stop",
}
]
}
func: dict = base_llm.get_choice_function(openai_funccall_resp)
assert func == {
"name": "execute",
"arguments": '{\n "language": "python",\n "code": "print(\'Hello, World!\')"\n}',
}
func_args: dict = base_llm.get_choice_function_arguments(openai_funccall_resp)
assert func_args == {"language": "python", "code": "print('Hello, World!')"}
choice_text = base_llm.get_choice_text(openai_funccall_resp)
assert choice_text == openai_funccall_resp["choices"][0]["message"]["content"]
# resp = base_llm.ask(prompt)
# assert resp == default_resp_cont
# resp = base_llm.ask_batch([prompt])
# assert resp == default_resp_cont
# resp = base_llm.ask_code([prompt])
# assert resp == default_resp_cont
@pytest.mark.asyncio
async def test_async_base_llm():
base_llm = MockBaseLLM()
resp = await base_llm.aask(prompt)
assert resp == default_resp_cont
resp = await base_llm.aask_batch([prompt])
assert resp == default_resp_cont
# resp = await base_llm.aask_code([prompt])
# assert resp == default_resp_cont
@pytest.mark.parametrize("compress_type", list(CompressType))
def test_compress_messages_no_effect(compress_type):
base_llm = MockBaseLLM()
messages = [
{"role": "system", "content": "first system msg"},
{"role": "system", "content": "second system msg"},
]
for i in range(5):
messages.append({"role": "user", "content": f"u{i}"})
messages.append({"role": "assistant", "content": f"a{i}"})
compressed = base_llm.compress_messages(messages, compress_type=compress_type)
# should take no effect for short context
assert compressed == messages
@pytest.mark.parametrize("compress_type", CompressType.cut_types())
def test_compress_messages_long(compress_type):
base_llm = MockBaseLLM()
base_llm.config.model = "test_llm"
max_token_limit = 100
messages = [
{"role": "system", "content": "first system msg"},
{"role": "system", "content": "second system msg"},
]
for i in range(100):
messages.append({"role": "user", "content": f"u{i}" * 10}) # ~2x10x0.5 = 10 tokens
messages.append({"role": "assistant", "content": f"a{i}" * 10})
compressed = base_llm.compress_messages(messages, compress_type=compress_type, max_token=max_token_limit)
print(compressed)
print(len(compressed))
assert 3 <= len(compressed) < len(messages)
assert compressed[0]["role"] == "system" and compressed[1]["role"] == "system"
assert compressed[2]["role"] != "system"
def test_long_messages_no_compress():
base_llm = MockBaseLLM()
messages = [{"role": "user", "content": "1" * 10000}] * 10000
compressed = base_llm.compress_messages(messages)
assert len(compressed) == len(messages)
@pytest.mark.parametrize("compress_type", CompressType.cut_types())
def test_compress_messages_long_no_sys_msg(compress_type):
base_llm = MockBaseLLM()
base_llm.config.model = "test_llm"
max_token_limit = 100
messages = [{"role": "user", "content": "1" * 10000}]
compressed = base_llm.compress_messages(messages, compress_type=compress_type, max_token=max_token_limit)
print(compressed)
assert compressed
assert len(compressed[0]["content"]) < len(messages[0]["content"])
def test_format_msg(mocker):
base_llm = MockBaseLLM()
messages = [UserMessage(content="req"), AIMessage(content="rsp")]
formatted_msgs = base_llm.format_msg(messages)
assert formatted_msgs == [{"role": "user", "content": "req"}, {"role": "assistant", "content": "rsp"}]
def test_format_msg_w_images(mocker):
base_llm = MockBaseLLM()
base_llm.model = "gpt-4o"
msg_w_images = UserMessage(content="req1")
msg_w_images.add_metadata(IMAGES, ["base64 string 1", "base64 string 2"])
msg_w_empty_images = UserMessage(content="req2")
msg_w_empty_images.add_metadata(IMAGES, [])
messages = [
msg_w_images, # should be converted
AIMessage(content="rsp"),
msg_w_empty_images, # should not be converted
]
formatted_msgs = base_llm.format_msg(messages)
assert formatted_msgs == [
{
"role": "user",
"content": [
{"type": "text", "text": "req1"},
{"type": "image_url", "image_url": {"url": "data:image/jpeg;base64,base64 string 1"}},
{"type": "image_url", "image_url": {"url": "data:image/jpeg;base64,base64 string 2"}},
],
},
{"role": "assistant", "content": "rsp"},
{"role": "user", "content": "req2"},
]
if name == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/provider/test_metagpt_llm.py | tests/metagpt/provider/test_metagpt_llm.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/8/30
@Author : mashenquan
@File : test_metagpt_llm.py
"""
from metagpt.provider.metagpt_api import MetaGPTLLM
from tests.metagpt.provider.mock_llm_config import mock_llm_config
def test_metagpt():
llm = MetaGPTLLM(mock_llm_config)
assert llm
if __name__ == "__main__":
test_metagpt()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/provider/test_qianfan_api.py | tests/metagpt/provider/test_qianfan_api.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : the unittest of qianfan api
from typing import AsyncIterator, Union
import pytest
from qianfan.resources.typing import JsonBody, QfResponse
from metagpt.provider.qianfan_api import QianFanLLM
from tests.metagpt.provider.mock_llm_config import mock_llm_config_qianfan
from tests.metagpt.provider.req_resp_const import (
get_qianfan_response,
llm_general_chat_funcs_test,
messages,
prompt,
resp_cont_tmpl,
)
name = "ERNIE-Bot-turbo"
resp_cont = resp_cont_tmpl.format(name=name)
def mock_qianfan_do(self, messages: list[dict], model: str, stream: bool = False, system: str = None) -> QfResponse:
return get_qianfan_response(name=name)
async def mock_qianfan_ado(
self, messages: list[dict], model: str, stream: bool = True, system: str = None
) -> Union[QfResponse, AsyncIterator[QfResponse]]:
resps = [get_qianfan_response(name=name)]
if stream:
async def aresp_iterator(resps: list[JsonBody]):
for resp in resps:
yield resp
return aresp_iterator(resps)
else:
return resps[0]
@pytest.mark.asyncio
async def test_qianfan_acompletion(mocker):
mocker.patch("qianfan.resources.llm.chat_completion.ChatCompletion.do", mock_qianfan_do)
mocker.patch("qianfan.resources.llm.chat_completion.ChatCompletion.ado", mock_qianfan_ado)
qianfan_llm = QianFanLLM(mock_llm_config_qianfan)
resp = qianfan_llm.completion(messages)
assert resp.get("result") == resp_cont
resp = await qianfan_llm.acompletion(messages)
assert resp.get("result") == resp_cont
await llm_general_chat_funcs_test(qianfan_llm, prompt, messages, resp_cont)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/provider/mock_llm_config.py | tests/metagpt/provider/mock_llm_config.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/1/8 17:03
@Author : alexanderwu
@File : mock_llm_config.py
"""
from metagpt.configs.llm_config import LLMConfig
mock_llm_config = LLMConfig(
llm_type="mock",
api_key="mock_api_key",
base_url="mock_base_url",
app_id="mock_app_id",
api_secret="mock_api_secret",
domain="mock_domain",
model="mock_model",
)
mock_llm_config_proxy = LLMConfig(
llm_type="mock",
api_key="mock_api_key",
base_url="mock_base_url",
proxy="http://localhost:8080",
)
mock_llm_config_azure = LLMConfig(
llm_type="azure",
api_version="2023-09-01-preview",
api_key="mock_api_key",
base_url="mock_base_url",
proxy="http://localhost:8080",
)
mock_llm_config_zhipu = LLMConfig(
llm_type="zhipu",
api_key="mock_api_key.zhipu",
base_url="mock_base_url",
model="mock_zhipu_model",
proxy="http://localhost:8080",
)
mock_llm_config_spark = LLMConfig(
api_type="spark",
app_id="xxx",
api_key="xxx",
api_secret="xxx",
domain="generalv2",
base_url="wss://spark-api.xf-yun.com/v3.1/chat",
)
mock_llm_config_qianfan = LLMConfig(api_type="qianfan", access_key="xxx", secret_key="xxx", model="ERNIE-Bot-turbo")
mock_llm_config_dashscope = LLMConfig(api_type="dashscope", api_key="xxx", model="qwen-max")
mock_llm_config_anthropic = LLMConfig(
api_type="anthropic", api_key="xxx", base_url="https://api.anthropic.com", model="claude-3-opus-20240229"
)
mock_llm_config_bedrock = LLMConfig(
api_type="bedrock",
model="gpt-100",
region_name="somewhere",
access_key="123abc",
secret_key="123abc",
max_token=10000,
)
mock_llm_config_ark = LLMConfig(api_type="ark", api_key="eyxxx", base_url="xxx", model="ep-xxx")
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/provider/test_spark_api.py | tests/metagpt/provider/test_spark_api.py | """
用于讯飞星火SDK的测试用例
文档:https://www.xfyun.cn/doc/spark/Web.html
"""
from typing import AsyncIterator, List
import pytest
from sparkai.core.messages.ai import AIMessage, AIMessageChunk
from sparkai.core.outputs.chat_generation import ChatGeneration
from sparkai.core.outputs.llm_result import LLMResult
from metagpt.provider.spark_api import SparkLLM
from tests.metagpt.provider.mock_llm_config import mock_llm_config_spark
from tests.metagpt.provider.req_resp_const import (
llm_general_chat_funcs_test,
messages,
prompt,
resp_cont_tmpl,
)
resp_cont = resp_cont_tmpl.format(name="Spark")
USAGE = {
"token_usage": {"question_tokens": 1000, "prompt_tokens": 1000, "completion_tokens": 1000, "total_tokens": 2000}
}
spark_agenerate_result = LLMResult(
generations=[[ChatGeneration(text=resp_cont, message=AIMessage(content=resp_cont, additional_kwargs=USAGE))]]
)
chunks = [AIMessageChunk(content=resp_cont), AIMessageChunk(content="", additional_kwargs=USAGE)]
async def chunk_iterator(chunks: List[AIMessageChunk]) -> AsyncIterator[AIMessageChunk]:
for chunk in chunks:
yield chunk
async def mock_spark_acreate(self, messages, stream):
if stream:
return chunk_iterator(chunks)
else:
return spark_agenerate_result
@pytest.mark.asyncio
async def test_spark_acompletion(mocker):
mocker.patch("metagpt.provider.spark_api.SparkLLM.acreate", mock_spark_acreate)
spark_llm = SparkLLM(mock_llm_config_spark)
resp = await spark_llm.acompletion([messages])
assert spark_llm.get_choice_text(resp) == resp_cont
await llm_general_chat_funcs_test(spark_llm, prompt, messages, resp_cont)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/provider/test_human_provider.py | tests/metagpt/provider/test_human_provider.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : the unittest of HumanProvider
import pytest
from metagpt.provider.human_provider import HumanProvider
from tests.metagpt.provider.mock_llm_config import mock_llm_config
resp_content = "test"
resp_exit = "exit"
@pytest.mark.asyncio
async def test_async_human_provider(mocker):
mocker.patch("builtins.input", lambda _: resp_content)
human_provider = HumanProvider(mock_llm_config)
resp = human_provider.ask(resp_content)
assert resp == resp_content
resp = await human_provider.aask(None)
assert resp_content == resp
mocker.patch("builtins.input", lambda _: resp_exit)
with pytest.raises(SystemExit):
human_provider.ask(resp_exit)
resp = await human_provider.acompletion([])
assert not resp
resp = await human_provider.acompletion_text([])
assert resp == ""
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/provider/postprocess/test_base_postprocess_plugin.py | tests/metagpt/provider/postprocess/test_base_postprocess_plugin.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
from metagpt.provider.postprocess.base_postprocess_plugin import BasePostProcessPlugin
raw_output = """
[CONTENT]
{
"Original Requirements": "xxx"
}
[/CONTENT]
"""
raw_schema = {
"title": "prd",
"type": "object",
"properties": {
"Original Requirements": {"title": "Original Requirements", "type": "string"},
},
"required": [
"Original Requirements",
],
}
def test_llm_post_process_plugin():
post_process_plugin = BasePostProcessPlugin()
output = post_process_plugin.run(output=raw_output, schema=raw_schema)
assert "Original Requirements" in output
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/provider/postprocess/__init__.py | tests/metagpt/provider/postprocess/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/provider/postprocess/test_llm_output_postprocess.py | tests/metagpt/provider/postprocess/test_llm_output_postprocess.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
from metagpt.provider.postprocess.llm_output_postprocess import llm_output_postprocess
from tests.metagpt.provider.postprocess.test_base_postprocess_plugin import (
raw_output,
raw_schema,
)
def test_llm_output_postprocess():
output = llm_output_postprocess(output=raw_output, schema=raw_schema)
assert "Original Requirements" in output
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/provider/zhipuai/test_zhipu_model_api.py | tests/metagpt/provider/zhipuai/test_zhipu_model_api.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
from typing import Any, Tuple
import pytest
import zhipuai
from metagpt.provider.zhipuai.zhipu_model_api import ZhiPuModelAPI
api_key = "xxx.xxx"
zhipuai.api_key = api_key
default_resp = b'{"choices": [{"finish_reason": "stop", "index": 0, "message": {"content": "test response", "role": "assistant"}}]}'
async def mock_requestor_arequest(self, **kwargs) -> Tuple[Any, Any, str]:
return default_resp, None, None
@pytest.mark.asyncio
async def test_zhipu_model_api(mocker):
url_prefix, url_suffix = ZhiPuModelAPI(api_key=api_key).split_zhipu_api_url()
assert url_prefix == "https://open.bigmodel.cn/api"
assert url_suffix == "/paas/v4/chat/completions"
mocker.patch("metagpt.provider.general_api_requestor.GeneralAPIRequestor.arequest", mock_requestor_arequest)
result = await ZhiPuModelAPI(api_key=api_key).arequest(
stream=False, method="get", headers={}, kwargs={"model": "glm-3-turbo"}
)
assert result == default_resp
result = await ZhiPuModelAPI(api_key=api_key).acreate()
assert result["choices"][0]["message"]["content"] == "test response"
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/provider/zhipuai/test_async_sse_client.py | tests/metagpt/provider/zhipuai/test_async_sse_client.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
import pytest
from metagpt.provider.zhipuai.async_sse_client import AsyncSSEClient
@pytest.mark.asyncio
async def test_async_sse_client():
class Iterator(object):
async def __aiter__(self):
yield b'data: {"test_key": "test_value"}'
async_sse_client = AsyncSSEClient(event_source=Iterator())
async for chunk in async_sse_client.stream():
assert "test_value" in chunk.values()
class InvalidIterator(object):
async def __aiter__(self):
yield b"invalid: test_value"
async_sse_client = AsyncSSEClient(event_source=InvalidIterator())
async for chunk in async_sse_client.stream():
assert not chunk
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.