repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/document_store/test_faiss_store.py | tests/metagpt/document_store/test_faiss_store.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/27 20:20
@Author : alexanderwu
@File : test_faiss_store.py
"""
import numpy as np
import pytest
from metagpt.const import EXAMPLE_PATH
from metagpt.document_store import FaissStore
from metagpt.logs import logger
from metagpt.roles import Sales
def mock_openai_embed_documents(self, texts: list[str], show_progress: bool = False) -> list[list[float]]:
num = len(texts)
embeds = np.random.randint(1, 100, size=(num, 1536)) # 1536: openai embedding dim
embeds = (embeds - embeds.mean(axis=0)) / embeds.std(axis=0)
return embeds.tolist()
def mock_openai_embed_document(self, text: str) -> list[float]:
embeds = mock_openai_embed_documents(self, [text])
return embeds[0]
@pytest.mark.asyncio
async def test_search_json(mocker):
mocker.patch("llama_index.embeddings.openai.base.OpenAIEmbedding._get_text_embeddings", mock_openai_embed_documents)
mocker.patch("llama_index.embeddings.openai.base.OpenAIEmbedding._get_text_embedding", mock_openai_embed_document)
store = FaissStore(EXAMPLE_PATH / "data/search_kb/example.json")
role = Sales(profile="Sales", store=store)
query = "Which facial cleanser is good for oily skin?"
result = await role.run(query)
logger.info(result)
@pytest.mark.asyncio
async def test_search_xlsx(mocker):
mocker.patch("llama_index.embeddings.openai.base.OpenAIEmbedding._get_text_embeddings", mock_openai_embed_documents)
mocker.patch("llama_index.embeddings.openai.base.OpenAIEmbedding._get_text_embedding", mock_openai_embed_document)
store = FaissStore(EXAMPLE_PATH / "data/search_kb/example.xlsx", meta_col="Answer", content_col="Question")
role = Sales(profile="Sales", store=store)
query = "Which facial cleanser is good for oily skin?"
result = await role.run(query)
logger.info(result)
@pytest.mark.asyncio
async def test_write(mocker):
mocker.patch("llama_index.embeddings.openai.base.OpenAIEmbedding._get_text_embeddings", mock_openai_embed_documents)
mocker.patch("llama_index.embeddings.openai.base.OpenAIEmbedding._get_text_embedding", mock_openai_embed_document)
store = FaissStore(EXAMPLE_PATH / "data/search_kb/example.xlsx", meta_col="Answer", content_col="Question")
_faiss_store = store.write()
assert _faiss_store.storage_context.docstore
assert _faiss_store.storage_context.vector_store.client
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/document_store/test_qdrant_store.py | tests/metagpt/document_store/test_qdrant_store.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/6/11 21:08
@Author : hezhaozhao
@File : test_qdrant_store.py
"""
import random
from qdrant_client.models import (
Distance,
FieldCondition,
Filter,
PointStruct,
Range,
VectorParams,
)
from metagpt.document_store.qdrant_store import QdrantConnection, QdrantStore
seed_value = 42
random.seed(seed_value)
vectors = [[random.random() for _ in range(2)] for _ in range(10)]
points = [
PointStruct(id=idx, vector=vector, payload={"color": "red", "rand_number": idx % 10})
for idx, vector in enumerate(vectors)
]
def assert_almost_equal(actual, expected):
delta = 1e-10
if isinstance(expected, list):
assert len(actual) == len(expected)
for ac, exp in zip(actual, expected):
assert abs(ac - exp) <= delta, f"{ac} is not within {delta} of {exp}"
else:
assert abs(actual - expected) <= delta, f"{actual} is not within {delta} of {expected}"
def test_qdrant_store():
qdrant_connection = QdrantConnection(memory=True)
vectors_config = VectorParams(size=2, distance=Distance.COSINE)
qdrant_store = QdrantStore(qdrant_connection)
qdrant_store.create_collection("Book", vectors_config, force_recreate=True)
assert qdrant_store.has_collection("Book") is True
qdrant_store.delete_collection("Book")
assert qdrant_store.has_collection("Book") is False
qdrant_store.create_collection("Book", vectors_config)
assert qdrant_store.has_collection("Book") is True
qdrant_store.add("Book", points)
results = qdrant_store.search("Book", query=[1.0, 1.0])
assert results[0]["id"] == 2
assert_almost_equal(results[0]["score"], 0.999106722578389)
assert results[1]["id"] == 7
assert_almost_equal(results[1]["score"], 0.9961650411397226)
results = qdrant_store.search("Book", query=[1.0, 1.0], return_vector=True)
assert results[0]["id"] == 2
assert_almost_equal(results[0]["score"], 0.999106722578389)
assert_almost_equal(results[0]["vector"], [0.7363563179969788, 0.6765939593315125])
assert results[1]["id"] == 7
assert_almost_equal(results[1]["score"], 0.9961650411397226)
assert_almost_equal(results[1]["vector"], [0.7662628889083862, 0.6425272226333618])
results = qdrant_store.search(
"Book",
query=[1.0, 1.0],
query_filter=Filter(must=[FieldCondition(key="rand_number", range=Range(gte=8))]),
)
assert results[0]["id"] == 8
assert_almost_equal(results[0]["score"], 0.9100373450784073)
assert results[1]["id"] == 9
assert_almost_equal(results[1]["score"], 0.7127610621127889)
results = qdrant_store.search(
"Book",
query=[1.0, 1.0],
query_filter=Filter(must=[FieldCondition(key="rand_number", range=Range(gte=8))]),
return_vector=True,
)
assert_almost_equal(results[0]["vector"], [0.35037919878959656, 0.9366079568862915])
assert_almost_equal(results[1]["vector"], [0.9999677538871765, 0.00802854634821415])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/document_store/__init__.py | tests/metagpt/document_store/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/27 20:19
@Author : alexanderwu
@File : __init__.py
"""
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/document_store/test_milvus_store.py | tests/metagpt/document_store/test_milvus_store.py | import random
seed_value = 42
random.seed(seed_value)
vectors = [[random.random() for _ in range(8)] for _ in range(10)]
ids = [f"doc_{i}" for i in range(10)]
metadata = [{"color": "red", "rand_number": i % 10} for i in range(10)]
def assert_almost_equal(actual, expected):
delta = 1e-10
if isinstance(expected, list):
assert len(actual) == len(expected)
for ac, exp in zip(actual, expected):
assert abs(ac - exp) <= delta, f"{ac} is not within {delta} of {exp}"
else:
assert abs(actual - expected) <= delta, f"{actual} is not within {delta} of {expected}"
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/document_store/test_document.py | tests/metagpt/document_store/test_document.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/6/11 19:46
@Author : alexanderwu
@File : test_document.py
"""
import pytest
from metagpt.const import METAGPT_ROOT
from metagpt.document import IndexableDocument
CASES = [
("requirements.txt", None, None, 0),
# ("cases/faq.csv", "Question", "Answer", 1),
# ("cases/faq.json", "Question", "Answer", 1),
# ("docx/faq.docx", None, None, 1),
# ("cases/faq.pdf", None, None, 0), # 这是因为pdf默认没有分割段落
# ("cases/faq.txt", None, None, 0), # 这是因为txt按照256分割段落
]
@pytest.mark.parametrize("relative_path, content_col, meta_col, threshold", CASES)
def test_document(relative_path, content_col, meta_col, threshold):
doc = IndexableDocument.from_path(METAGPT_ROOT / relative_path, content_col, meta_col)
rsp = doc.get_docs_and_metadatas()
assert len(rsp[0]) > threshold
assert len(rsp[1]) > threshold
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/document_store/test_lancedb_store.py | tests/metagpt/document_store/test_lancedb_store.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/8/9 15:42
@Author : unkn-wn (Leon Yee)
@File : test_lancedb_store.py
"""
import random
from metagpt.document_store.lancedb_store import LanceStore
def test_lance_store():
# This simply establishes the connection to the database, so we can drop the table if it exists
store = LanceStore("test")
store.drop("test")
store.write(
data=[[random.random() for _ in range(100)] for _ in range(2)],
metadatas=[{"source": "google-docs"}, {"source": "notion"}],
ids=["doc1", "doc2"],
)
store.add(data=[random.random() for _ in range(100)], metadata={"source": "notion"}, _id="doc3")
result = store.search([random.random() for _ in range(100)], n_results=3)
assert len(result) == 3
store.delete("doc2")
result = store.search(
[random.random() for _ in range(100)], n_results=3, where="source = 'notion'", metric="cosine"
)
assert len(result) == 1
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/rag/test_large_pdf.py | tests/metagpt/rag/test_large_pdf.py | import pytest
from metagpt.config2 import Config
from metagpt.const import TEST_DATA_PATH
from metagpt.rag.engines import SimpleEngine
from metagpt.rag.factories.embedding import RAGEmbeddingFactory
from metagpt.utils.common import aread
@pytest.mark.skip
@pytest.mark.parametrize(
("knowledge_filename", "query_filename", "answer_filename"),
[
(
TEST_DATA_PATH / "embedding/2.knowledge.md",
TEST_DATA_PATH / "embedding/2.query.md",
TEST_DATA_PATH / "embedding/2.answer.md",
),
(
TEST_DATA_PATH / "embedding/3.knowledge.md",
TEST_DATA_PATH / "embedding/3.query.md",
TEST_DATA_PATH / "embedding/3.answer.md",
),
],
)
@pytest.mark.asyncio
async def test_large_pdf(knowledge_filename, query_filename, answer_filename):
Config.default(reload=True) # `config.embedding.model = "text-embedding-ada-002"` changes the cache.
engine = SimpleEngine.from_docs(
input_files=[knowledge_filename],
)
query = await aread(filename=query_filename)
rsp = await engine.aretrieve(query)
assert rsp
config = Config.default()
config.embedding.model = "text-embedding-ada-002"
factory = RAGEmbeddingFactory(config)
embedding = factory.get_rag_embedding()
answer = await aread(filename=answer_filename)
answer_embedding = await embedding.aget_text_embedding(answer)
similarity = 0
for i in rsp:
rsp_embedding = await embedding.aget_query_embedding(i.text)
v = embedding.similarity(answer_embedding, rsp_embedding)
similarity = max(similarity, v)
print(similarity)
assert similarity > 0.9
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/rag/__init__.py | tests/metagpt/rag/__init__.py | python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false | |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/rag/factories/test_ranker.py | tests/metagpt/rag/factories/test_ranker.py | import contextlib
import pytest
from llama_index.core.llms import MockLLM
from llama_index.core.postprocessor import LLMRerank
from metagpt.rag.factories.ranker import RankerFactory
from metagpt.rag.schema import ColbertRerankConfig, LLMRankerConfig, ObjectRankerConfig
class TestRankerFactory:
@pytest.fixture(autouse=True)
def ranker_factory(self):
self.ranker_factory: RankerFactory = RankerFactory()
@pytest.fixture
def mock_llm(self):
return MockLLM()
def test_get_rankers_with_no_configs(self, mock_llm, mocker):
mocker.patch.object(self.ranker_factory, "_extract_llm", return_value=mock_llm)
default_rankers = self.ranker_factory.get_rankers()
assert len(default_rankers) == 0
def test_get_rankers_with_configs(self, mock_llm):
mock_config = LLMRankerConfig(llm=mock_llm)
rankers = self.ranker_factory.get_rankers(configs=[mock_config])
assert len(rankers) == 1
assert isinstance(rankers[0], LLMRerank)
def test_extract_llm_from_config(self, mock_llm):
mock_config = LLMRankerConfig(llm=mock_llm)
extracted_llm = self.ranker_factory._extract_llm(config=mock_config)
assert extracted_llm == mock_llm
def test_extract_llm_from_kwargs(self, mock_llm):
extracted_llm = self.ranker_factory._extract_llm(llm=mock_llm)
assert extracted_llm == mock_llm
def test_create_llm_ranker(self, mock_llm):
mock_config = LLMRankerConfig(llm=mock_llm)
ranker = self.ranker_factory._create_llm_ranker(mock_config)
assert isinstance(ranker, LLMRerank)
def test_create_colbert_ranker(self, mocker, mock_llm):
with contextlib.suppress(ImportError):
mocker.patch("llama_index.postprocessor.colbert_rerank.ColbertRerank", return_value="colbert")
mock_config = ColbertRerankConfig(llm=mock_llm)
ranker = self.ranker_factory._create_colbert_ranker(mock_config)
assert ranker == "colbert"
def test_create_object_ranker(self, mocker, mock_llm):
mocker.patch("metagpt.rag.factories.ranker.ObjectSortPostprocessor", return_value="object")
mock_config = ObjectRankerConfig(field_name="fake", llm=mock_llm)
ranker = self.ranker_factory._create_object_ranker(mock_config)
assert ranker == "object"
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/rag/factories/test_llm.py | tests/metagpt/rag/factories/test_llm.py | from typing import Optional, Union
import pytest
from llama_index.core.llms import LLMMetadata
from metagpt.configs.llm_config import LLMConfig
from metagpt.const import USE_CONFIG_TIMEOUT
from metagpt.provider.base_llm import BaseLLM
from metagpt.rag.factories.llm import RAGLLM, get_rag_llm
class MockLLM(BaseLLM):
def __init__(self, config: LLMConfig):
...
async def _achat_completion(self, messages: list[dict], timeout=USE_CONFIG_TIMEOUT):
"""_achat_completion implemented by inherited class"""
async def acompletion(self, messages: list[dict], timeout=USE_CONFIG_TIMEOUT):
return "ok"
def completion(self, messages: list[dict], timeout=USE_CONFIG_TIMEOUT):
return "ok"
async def _achat_completion_stream(self, messages: list[dict], timeout: int = USE_CONFIG_TIMEOUT) -> str:
"""_achat_completion_stream implemented by inherited class"""
async def aask(
self,
msg: Union[str, list[dict[str, str]]],
system_msgs: Optional[list[str]] = None,
format_msgs: Optional[list[dict[str, str]]] = None,
images: Optional[Union[str, list[str]]] = None,
timeout=USE_CONFIG_TIMEOUT,
stream=True,
) -> str:
return "ok"
class TestRAGLLM:
@pytest.fixture
def mock_model_infer(self):
return MockLLM(config=LLMConfig())
@pytest.fixture
def rag_llm(self, mock_model_infer):
return RAGLLM(model_infer=mock_model_infer)
def test_metadata(self, rag_llm):
metadata = rag_llm.metadata
assert isinstance(metadata, LLMMetadata)
assert metadata.context_window == rag_llm.context_window
assert metadata.num_output == rag_llm.num_output
assert metadata.model_name == rag_llm.model_name
@pytest.mark.asyncio
async def test_acomplete(self, rag_llm, mock_model_infer):
response = await rag_llm.acomplete("question")
assert response.text == "ok"
def test_complete(self, rag_llm, mock_model_infer):
response = rag_llm.complete("question")
assert response.text == "ok"
def test_stream_complete(self, rag_llm, mock_model_infer):
rag_llm.stream_complete("question")
def test_get_rag_llm():
result = get_rag_llm(MockLLM(config=LLMConfig()))
assert isinstance(result, RAGLLM)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/rag/factories/test_index.py | tests/metagpt/rag/factories/test_index.py | import pytest
from llama_index.core.embeddings import MockEmbedding
from metagpt.rag.factories.index import RAGIndexFactory
from metagpt.rag.schema import (
BM25IndexConfig,
ChromaIndexConfig,
ElasticsearchIndexConfig,
ElasticsearchStoreConfig,
FAISSIndexConfig,
)
class TestRAGIndexFactory:
@pytest.fixture(autouse=True)
def setup(self):
self.index_factory = RAGIndexFactory()
@pytest.fixture
def faiss_config(self):
return FAISSIndexConfig(persist_path="")
@pytest.fixture
def chroma_config(self):
return ChromaIndexConfig(persist_path="", collection_name="")
@pytest.fixture
def bm25_config(self):
return BM25IndexConfig(persist_path="")
@pytest.fixture
def es_config(self, mocker):
return ElasticsearchIndexConfig(store_config=ElasticsearchStoreConfig())
@pytest.fixture
def mock_storage_context(self, mocker):
return mocker.patch("metagpt.rag.factories.index.StorageContext.from_defaults")
@pytest.fixture
def mock_load_index_from_storage(self, mocker):
return mocker.patch("metagpt.rag.factories.index.load_index_from_storage")
@pytest.fixture
def mock_from_vector_store(self, mocker):
return mocker.patch("metagpt.rag.factories.index.VectorStoreIndex.from_vector_store")
@pytest.fixture
def mock_embedding(self):
return MockEmbedding(embed_dim=1)
def test_create_faiss_index(
self, mocker, faiss_config, mock_storage_context, mock_load_index_from_storage, mock_embedding
):
# Mock
mock_faiss_store = mocker.patch("metagpt.rag.factories.index.FaissVectorStore.from_persist_dir")
# Exec
self.index_factory.get_index(faiss_config, embed_model=mock_embedding)
# Assert
mock_faiss_store.assert_called_once()
def test_create_bm25_index(
self, mocker, bm25_config, mock_storage_context, mock_load_index_from_storage, mock_embedding
):
self.index_factory.get_index(bm25_config, embed_model=mock_embedding)
def test_create_chroma_index(self, mocker, chroma_config, mock_from_vector_store, mock_embedding):
# Mock
mock_chroma_db = mocker.patch("metagpt.rag.factories.index.chromadb.PersistentClient")
mock_chroma_db.get_or_create_collection.return_value = mocker.MagicMock()
mock_chroma_store = mocker.patch("metagpt.rag.factories.index.ChromaVectorStore")
# Exec
self.index_factory.get_index(chroma_config, embed_model=mock_embedding)
# Assert
mock_chroma_store.assert_called_once()
def test_create_es_index(self, mocker, es_config, mock_from_vector_store, mock_embedding):
# Mock
mock_es_store = mocker.patch("metagpt.rag.factories.index.ElasticsearchStore")
# Exec
self.index_factory.get_index(es_config, embed_model=mock_embedding)
# Assert
mock_es_store.assert_called_once()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/rag/factories/test_retriever.py | tests/metagpt/rag/factories/test_retriever.py | import faiss
import pytest
from llama_index.core import VectorStoreIndex
from llama_index.core.embeddings import MockEmbedding
from llama_index.core.schema import TextNode
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.vector_stores.elasticsearch import ElasticsearchStore
from metagpt.rag.factories.retriever import RetrieverFactory
from metagpt.rag.retrievers.bm25_retriever import DynamicBM25Retriever
from metagpt.rag.retrievers.chroma_retriever import ChromaRetriever
from metagpt.rag.retrievers.es_retriever import ElasticsearchRetriever
from metagpt.rag.retrievers.faiss_retriever import FAISSRetriever
from metagpt.rag.retrievers.hybrid_retriever import SimpleHybridRetriever
from metagpt.rag.schema import (
BM25RetrieverConfig,
ChromaRetrieverConfig,
ElasticsearchRetrieverConfig,
ElasticsearchStoreConfig,
FAISSRetrieverConfig,
)
class TestRetrieverFactory:
@pytest.fixture(autouse=True)
def retriever_factory(self):
self.retriever_factory: RetrieverFactory = RetrieverFactory()
@pytest.fixture
def mock_faiss_index(self, mocker):
return mocker.MagicMock(spec=faiss.IndexFlatL2)
@pytest.fixture
def mock_vector_store_index(self, mocker):
mock = mocker.MagicMock(spec=VectorStoreIndex)
mock._embed_model = mocker.MagicMock()
mock.docstore.docs.values.return_value = []
return mock
@pytest.fixture
def mock_chroma_vector_store(self, mocker):
return mocker.MagicMock(spec=ChromaVectorStore)
@pytest.fixture
def mock_es_vector_store(self, mocker):
return mocker.MagicMock(spec=ElasticsearchStore)
@pytest.fixture
def mock_nodes(self, mocker):
return [TextNode(text="msg")]
@pytest.fixture
def mock_embedding(self):
return MockEmbedding(embed_dim=1)
def test_get_retriever_with_faiss_config(self, mock_faiss_index, mocker, mock_vector_store_index):
mock_config = FAISSRetrieverConfig(dimensions=128)
mocker.patch("faiss.IndexFlatL2", return_value=mock_faiss_index)
mocker.patch.object(self.retriever_factory, "_extract_index", return_value=mock_vector_store_index)
retriever = self.retriever_factory.get_retriever(configs=[mock_config])
assert isinstance(retriever, FAISSRetriever)
def test_get_retriever_with_bm25_config(self, mocker, mock_nodes):
mock_config = BM25RetrieverConfig()
mocker.patch("rank_bm25.BM25Okapi.__init__", return_value=None)
retriever = self.retriever_factory.get_retriever(configs=[mock_config], nodes=mock_nodes)
assert isinstance(retriever, DynamicBM25Retriever)
def test_get_retriever_with_multiple_configs_returns_hybrid(self, mocker, mock_nodes, mock_embedding):
mock_faiss_config = FAISSRetrieverConfig(dimensions=1)
mock_bm25_config = BM25RetrieverConfig()
mocker.patch("rank_bm25.BM25Okapi.__init__", return_value=None)
retriever = self.retriever_factory.get_retriever(
configs=[mock_faiss_config, mock_bm25_config], nodes=mock_nodes, embed_model=mock_embedding
)
assert isinstance(retriever, SimpleHybridRetriever)
def test_get_retriever_with_chroma_config(self, mocker, mock_chroma_vector_store, mock_embedding):
mock_config = ChromaRetrieverConfig(persist_path="/path/to/chroma", collection_name="test_collection")
mock_chromadb = mocker.patch("metagpt.rag.factories.retriever.chromadb.PersistentClient")
mock_chromadb.get_or_create_collection.return_value = mocker.MagicMock()
mocker.patch("metagpt.rag.factories.retriever.ChromaVectorStore", return_value=mock_chroma_vector_store)
retriever = self.retriever_factory.get_retriever(configs=[mock_config], nodes=[], embed_model=mock_embedding)
assert isinstance(retriever, ChromaRetriever)
def test_get_retriever_with_es_config(self, mocker, mock_es_vector_store, mock_embedding):
mock_config = ElasticsearchRetrieverConfig(store_config=ElasticsearchStoreConfig())
mocker.patch("metagpt.rag.factories.retriever.ElasticsearchStore", return_value=mock_es_vector_store)
retriever = self.retriever_factory.get_retriever(configs=[mock_config], nodes=[], embed_model=mock_embedding)
assert isinstance(retriever, ElasticsearchRetriever)
def test_create_default_retriever(self, mocker, mock_vector_store_index):
mocker.patch.object(self.retriever_factory, "_extract_index", return_value=mock_vector_store_index)
mock_vector_store_index.as_retriever = mocker.MagicMock()
retriever = self.retriever_factory.get_retriever()
mock_vector_store_index.as_retriever.assert_called_once()
assert retriever is mock_vector_store_index.as_retriever.return_value
def test_extract_index_from_config(self, mock_vector_store_index):
mock_config = FAISSRetrieverConfig(index=mock_vector_store_index)
extracted_index = self.retriever_factory._extract_index(config=mock_config)
assert extracted_index == mock_vector_store_index
def test_extract_index_from_kwargs(self, mock_vector_store_index):
extracted_index = self.retriever_factory._extract_index(index=mock_vector_store_index)
assert extracted_index == mock_vector_store_index
def test_get_or_build_when_get(self, mocker):
want = "existing_index"
mocker.patch.object(self.retriever_factory, "_extract_index", return_value=want)
got = self.retriever_factory._build_es_index(None)
assert got == want
def test_get_or_build_when_build(self, mocker):
want = "call_build_es_index"
mocker.patch.object(self.retriever_factory, "_build_es_index", return_value=want)
got = self.retriever_factory._build_es_index(None)
assert got == want
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/rag/factories/test_embedding.py | tests/metagpt/rag/factories/test_embedding.py | import pytest
from metagpt.config2 import Config
from metagpt.configs.embedding_config import EmbeddingType
from metagpt.configs.llm_config import LLMType
from metagpt.rag.factories.embedding import RAGEmbeddingFactory
class TestRAGEmbeddingFactory:
@pytest.fixture(autouse=True)
def mock_embedding_factory(self):
self.embedding_factory = RAGEmbeddingFactory()
@pytest.fixture
def mock_config(self, mocker):
config = Config.default().model_copy(deep=True)
default = mocker.patch("metagpt.config2.Config.default")
default.return_value = config
return config
@staticmethod
def mock_openai_embedding(mocker):
return mocker.patch("metagpt.rag.factories.embedding.OpenAIEmbedding")
@staticmethod
def mock_azure_embedding(mocker):
return mocker.patch("metagpt.rag.factories.embedding.AzureOpenAIEmbedding")
@staticmethod
def mock_gemini_embedding(mocker):
return mocker.patch("metagpt.rag.factories.embedding.GeminiEmbedding")
@staticmethod
def mock_ollama_embedding(mocker):
return mocker.patch("metagpt.rag.factories.embedding.OllamaEmbedding")
@pytest.mark.parametrize(
("mock_func", "embedding_type"),
[
(mock_openai_embedding, LLMType.OPENAI),
(mock_azure_embedding, LLMType.AZURE),
(mock_openai_embedding, EmbeddingType.OPENAI),
(mock_azure_embedding, EmbeddingType.AZURE),
(mock_gemini_embedding, EmbeddingType.GEMINI),
(mock_ollama_embedding, EmbeddingType.OLLAMA),
],
)
def test_get_rag_embedding(self, mock_func, embedding_type, mocker):
# Mock
mock = mock_func(mocker)
# Exec
self.embedding_factory.get_rag_embedding(embedding_type)
# Assert
mock.assert_called_once()
def test_get_rag_embedding_default(self, mocker, mock_config):
# Mock
mock_openai_embedding = self.mock_openai_embedding(mocker)
mock_config.embedding.api_type = None
mock_config.llm.api_type = LLMType.OPENAI
# Exec
self.embedding_factory.get_rag_embedding()
# Assert
mock_openai_embedding.assert_called_once()
@pytest.mark.parametrize(
"model, embed_batch_size, expected_params",
[("test_model", 100, {"model_name": "test_model", "embed_batch_size": 100}), (None, None, {})],
)
def test_try_set_model_and_batch_size(self, mock_config, model, embed_batch_size, expected_params):
# Mock
mock_config.embedding.model = model
mock_config.embedding.embed_batch_size = embed_batch_size
# Setup
test_params = {}
# Exec
self.embedding_factory._try_set_model_and_batch_size(test_params)
# Assert
assert test_params == expected_params
def test_resolve_embedding_type(self, mock_config):
# Mock
mock_config.embedding.api_type = EmbeddingType.OPENAI
# Exec
embedding_type = self.embedding_factory._resolve_embedding_type()
# Assert
assert embedding_type == EmbeddingType.OPENAI
def test_resolve_embedding_type_exception(self, mock_config):
# Mock
mock_config.embedding.api_type = None
mock_config.llm.api_type = LLMType.GEMINI
# Assert
with pytest.raises(TypeError):
self.embedding_factory._resolve_embedding_type()
def test_raise_for_key(self):
with pytest.raises(ValueError):
self.embedding_factory._raise_for_key("key")
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/rag/factories/test_base.py | tests/metagpt/rag/factories/test_base.py | import pytest
from metagpt.rag.factories.base import ConfigBasedFactory, GenericFactory
class TestGenericFactory:
@pytest.fixture
def creators(self):
return {
"type1": lambda name: f"Instance of type1 with {name}",
"type2": lambda name: f"Instance of type2 with {name}",
}
@pytest.fixture
def factory(self, creators):
return GenericFactory(creators=creators)
def test_get_instance_success(self, factory):
# Test successful retrieval of an instance
key = "type1"
instance = factory.get_instance(key, name="TestName")
assert instance == "Instance of type1 with TestName"
def test_get_instance_failure(self, factory):
# Test failure to retrieve an instance due to unregistered key
with pytest.raises(ValueError) as exc_info:
factory.get_instance("unknown_key")
assert "Creator not registered for key: unknown_key" in str(exc_info.value)
def test_get_instances_success(self, factory):
# Test successful retrieval of multiple instances
keys = ["type1", "type2"]
instances = factory.get_instances(keys, name="TestName")
expected = ["Instance of type1 with TestName", "Instance of type2 with TestName"]
assert instances == expected
@pytest.mark.parametrize(
"keys,expected_exception_message",
[
(["unknown_key"], "Creator not registered for key: unknown_key"),
(["type1", "unknown_key"], "Creator not registered for key: unknown_key"),
],
)
def test_get_instances_with_failure(self, factory, keys, expected_exception_message):
# Test failure to retrieve instances due to at least one unregistered key
with pytest.raises(ValueError) as exc_info:
factory.get_instances(keys, name="TestName")
assert expected_exception_message in str(exc_info.value)
class DummyConfig:
"""A dummy config class for testing."""
def __init__(self, name):
self.name = name
class TestConfigBasedFactory:
@pytest.fixture
def config_creators(self):
return {
DummyConfig: lambda config, **kwargs: f"Processed {config.name} with {kwargs.get('extra', 'no extra')}",
}
@pytest.fixture
def config_factory(self, config_creators):
return ConfigBasedFactory(creators=config_creators)
def test_get_instance_success(self, config_factory):
# Test successful retrieval of an instance
config = DummyConfig(name="TestConfig")
instance = config_factory.get_instance(config, extra="additional data")
assert instance == "Processed TestConfig with additional data"
def test_get_instance_failure(self, config_factory):
# Test failure to retrieve an instance due to unknown config type
class UnknownConfig:
pass
config = UnknownConfig()
with pytest.raises(ValueError) as exc_info:
config_factory.get_instance(config)
assert "Unknown config:" in str(exc_info.value)
def test_val_from_config_or_kwargs_priority(self):
# Test that the value from the config object has priority over kwargs
config = DummyConfig(name="ConfigName")
result = ConfigBasedFactory._val_from_config_or_kwargs("name", config, name="KwargsName")
assert result == "ConfigName"
def test_val_from_config_or_kwargs_fallback_to_kwargs(self):
# Test fallback to kwargs when config object does not have the value
config = DummyConfig(name=None)
result = ConfigBasedFactory._val_from_config_or_kwargs("name", config, name="KwargsName")
assert result == "KwargsName"
def test_val_from_config_or_kwargs_key_error(self):
# Test KeyError when the key is not found in both config object and kwargs
config = DummyConfig(name=None)
val = ConfigBasedFactory._val_from_config_or_kwargs("missing_key", config)
assert val is None
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/rag/rankers/test_base_ranker.py | tests/metagpt/rag/rankers/test_base_ranker.py | import pytest
from llama_index.core.schema import NodeWithScore, QueryBundle, TextNode
from metagpt.rag.rankers.base import RAGRanker
class SimpleRAGRanker(RAGRanker):
def _postprocess_nodes(self, nodes, query_bundle=None):
return [NodeWithScore(node=node.node, score=node.score + 1) for node in nodes]
class TestSimpleRAGRanker:
@pytest.fixture
def ranker(self):
return SimpleRAGRanker()
def test_postprocess_nodes_increases_scores(self, ranker):
nodes = [NodeWithScore(node=TextNode(text="a"), score=10), NodeWithScore(node=TextNode(text="b"), score=20)]
query_bundle = QueryBundle(query_str="test query")
processed_nodes = ranker._postprocess_nodes(nodes, query_bundle)
assert all(node.score == original_node.score + 1 for node, original_node in zip(processed_nodes, nodes))
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/rag/rankers/test_object_ranker.py | tests/metagpt/rag/rankers/test_object_ranker.py | import json
import pytest
from llama_index.core.schema import NodeWithScore, QueryBundle
from pydantic import BaseModel
from metagpt.rag.rankers.object_ranker import ObjectSortPostprocessor
from metagpt.rag.schema import ObjectNode
class Record(BaseModel):
score: int
class TestObjectSortPostprocessor:
@pytest.fixture
def mock_nodes_with_scores(self):
nodes = [
NodeWithScore(node=ObjectNode(metadata={"obj_json": Record(score=10).model_dump_json()}), score=10),
NodeWithScore(node=ObjectNode(metadata={"obj_json": Record(score=20).model_dump_json()}), score=20),
NodeWithScore(node=ObjectNode(metadata={"obj_json": Record(score=5).model_dump_json()}), score=5),
]
return nodes
@pytest.fixture
def mock_query_bundle(self, mocker):
return mocker.MagicMock(spec=QueryBundle)
def test_sort_descending(self, mock_nodes_with_scores, mock_query_bundle):
postprocessor = ObjectSortPostprocessor(field_name="score", order="desc")
sorted_nodes = postprocessor._postprocess_nodes(mock_nodes_with_scores, mock_query_bundle)
assert [node.score for node in sorted_nodes] == [20, 10, 5]
def test_sort_ascending(self, mock_nodes_with_scores, mock_query_bundle):
postprocessor = ObjectSortPostprocessor(field_name="score", order="asc")
sorted_nodes = postprocessor._postprocess_nodes(mock_nodes_with_scores, mock_query_bundle)
assert [node.score for node in sorted_nodes] == [5, 10, 20]
def test_top_n_limit(self, mock_nodes_with_scores, mock_query_bundle):
postprocessor = ObjectSortPostprocessor(field_name="score", order="desc", top_n=2)
sorted_nodes = postprocessor._postprocess_nodes(mock_nodes_with_scores, mock_query_bundle)
assert len(sorted_nodes) == 2
assert [node.score for node in sorted_nodes] == [20, 10]
def test_invalid_json_metadata(self, mock_query_bundle):
nodes = [NodeWithScore(node=ObjectNode(metadata={"obj_json": "invalid_json"}), score=10)]
postprocessor = ObjectSortPostprocessor(field_name="score", order="desc")
with pytest.raises(ValueError):
postprocessor._postprocess_nodes(nodes, mock_query_bundle)
def test_missing_query_bundle(self, mock_nodes_with_scores):
postprocessor = ObjectSortPostprocessor(field_name="score", order="desc")
with pytest.raises(ValueError):
postprocessor._postprocess_nodes(mock_nodes_with_scores, query_bundle=None)
def test_field_not_found_in_object(self, mock_query_bundle):
nodes = [NodeWithScore(node=ObjectNode(metadata={"obj_json": json.dumps({"not_score": 10})}), score=10)]
postprocessor = ObjectSortPostprocessor(field_name="score", order="desc")
with pytest.raises(ValueError):
postprocessor._postprocess_nodes(nodes, query_bundle=mock_query_bundle)
def test_not_nodes(self, mock_query_bundle):
nodes = []
postprocessor = ObjectSortPostprocessor(field_name="score", order="desc")
result = postprocessor._postprocess_nodes(nodes, mock_query_bundle)
assert result == []
def test_class_name(self):
assert ObjectSortPostprocessor.class_name() == "ObjectSortPostprocessor"
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/rag/engines/test_simple.py | tests/metagpt/rag/engines/test_simple.py | import json
import pytest
from llama_index.core import VectorStoreIndex
from llama_index.core.embeddings import MockEmbedding
from llama_index.core.llms import MockLLM
from llama_index.core.schema import Document, NodeWithScore, TextNode
from metagpt.rag.engines import SimpleEngine
from metagpt.rag.parsers import OmniParse
from metagpt.rag.retrievers import SimpleHybridRetriever
from metagpt.rag.retrievers.base import ModifiableRAGRetriever, PersistableRAGRetriever
from metagpt.rag.schema import BM25RetrieverConfig, ObjectNode
class TestSimpleEngine:
@pytest.fixture
def mock_llm(self):
return MockLLM()
@pytest.fixture
def mock_embedding(self):
return MockEmbedding(embed_dim=1)
@pytest.fixture
def mock_simple_directory_reader(self, mocker):
return mocker.patch("metagpt.rag.engines.simple.SimpleDirectoryReader")
@pytest.fixture
def mock_get_retriever(self, mocker):
return mocker.patch("metagpt.rag.engines.simple.get_retriever")
@pytest.fixture
def mock_get_rankers(self, mocker):
return mocker.patch("metagpt.rag.engines.simple.get_rankers")
@pytest.fixture
def mock_get_response_synthesizer(self, mocker):
return mocker.patch("metagpt.rag.engines.simple.get_response_synthesizer")
@pytest.fixture
def mock_get_file_extractor(self, mocker):
return mocker.patch("metagpt.rag.engines.simple.SimpleEngine._get_file_extractor")
def test_from_docs(
self,
mocker,
mock_simple_directory_reader,
mock_get_retriever,
mock_get_rankers,
mock_get_response_synthesizer,
mock_get_file_extractor,
):
# Mock
mock_simple_directory_reader.return_value.load_data.return_value = [
Document(text="document1"),
Document(text="document2"),
]
mock_get_retriever.return_value = mocker.MagicMock()
mock_get_rankers.return_value = [mocker.MagicMock()]
mock_get_response_synthesizer.return_value = mocker.MagicMock()
file_extractor = mocker.MagicMock()
mock_get_file_extractor.return_value = file_extractor
# Setup
input_dir = "test_dir"
input_files = ["test_file1", "test_file2"]
transformations = [mocker.MagicMock()]
embed_model = mocker.MagicMock()
llm = mocker.MagicMock()
retriever_configs = [mocker.MagicMock()]
ranker_configs = [mocker.MagicMock()]
# Exec
engine = SimpleEngine.from_docs(
input_dir=input_dir,
input_files=input_files,
transformations=transformations,
embed_model=embed_model,
llm=llm,
retriever_configs=retriever_configs,
ranker_configs=ranker_configs,
)
# Assert
mock_simple_directory_reader.assert_called_once_with(
input_dir=input_dir, input_files=input_files, file_extractor=file_extractor, fs=None
)
mock_get_retriever.assert_called_once()
mock_get_rankers.assert_called_once()
mock_get_response_synthesizer.assert_called_once_with(llm=llm)
assert isinstance(engine, SimpleEngine)
def test_from_docs_without_file(self):
with pytest.raises(ValueError):
SimpleEngine.from_docs()
def test_from_objs(self, mock_llm, mock_embedding):
# Mock
class MockRAGObject:
def rag_key(self):
return "key"
def model_dump_json(self):
return "{}"
objs = [MockRAGObject()]
# Setup
retriever_configs = []
ranker_configs = []
# Exec
engine = SimpleEngine.from_objs(
objs=objs,
llm=mock_llm,
embed_model=mock_embedding,
retriever_configs=retriever_configs,
ranker_configs=ranker_configs,
)
# Assert
assert isinstance(engine, SimpleEngine)
assert engine._transformations is not None
def test_from_objs_with_bm25_config(self):
# Setup
retriever_configs = [BM25RetrieverConfig()]
# Exec
with pytest.raises(ValueError):
SimpleEngine.from_objs(
objs=[],
llm=MockLLM(),
retriever_configs=retriever_configs,
ranker_configs=[],
)
def test_from_index(self, mocker, mock_llm, mock_embedding):
# Mock
mock_index = mocker.MagicMock(spec=VectorStoreIndex)
mock_index.as_retriever.return_value = "retriever"
mock_get_index = mocker.patch("metagpt.rag.engines.simple.get_index")
mock_get_index.return_value = mock_index
# Exec
engine = SimpleEngine.from_index(
index_config=mock_index,
embed_model=mock_embedding,
llm=mock_llm,
)
# Assert
assert isinstance(engine, SimpleEngine)
assert engine._retriever == "retriever"
@pytest.mark.asyncio
async def test_asearch(self, mocker):
# Mock
test_query = "test query"
expected_result = "expected result"
mock_aquery = mocker.AsyncMock(return_value=expected_result)
# Setup
engine = SimpleEngine(retriever=mocker.MagicMock())
engine.aquery = mock_aquery
# Exec
result = await engine.asearch(test_query)
# Assert
mock_aquery.assert_called_once_with(test_query)
assert result == expected_result
@pytest.mark.asyncio
async def test_aretrieve(self, mocker):
# Mock
mock_query_bundle = mocker.patch("metagpt.rag.engines.simple.QueryBundle", return_value="query_bundle")
mock_super_aretrieve = mocker.patch(
"metagpt.rag.engines.simple.RetrieverQueryEngine.aretrieve", new_callable=mocker.AsyncMock
)
mock_super_aretrieve.return_value = [TextNode(text="node_with_score", metadata={"is_obj": False})]
# Setup
engine = SimpleEngine(retriever=mocker.MagicMock())
test_query = "test query"
# Exec
result = await engine.aretrieve(test_query)
# Assert
mock_query_bundle.assert_called_once_with(test_query)
mock_super_aretrieve.assert_called_once_with("query_bundle")
assert result[0].text == "node_with_score"
def test_add_docs(self, mocker):
# Mock
mock_simple_directory_reader = mocker.patch("metagpt.rag.engines.simple.SimpleDirectoryReader")
mock_simple_directory_reader.return_value.load_data.return_value = [
Document(text="document1"),
Document(text="document2"),
]
mock_retriever = mocker.MagicMock(spec=ModifiableRAGRetriever)
mock_run_transformations = mocker.patch("metagpt.rag.engines.simple.run_transformations")
mock_run_transformations.return_value = ["node1", "node2"]
# Setup
engine = SimpleEngine(retriever=mock_retriever)
input_files = ["test_file1", "test_file2"]
# Exec
engine.add_docs(input_files=input_files)
# Assert
mock_simple_directory_reader.assert_called_once_with(input_files=input_files)
mock_retriever.add_nodes.assert_called_once_with(["node1", "node2"])
def test_add_objs(self, mocker):
# Mock
mock_retriever = mocker.MagicMock(spec=ModifiableRAGRetriever)
# Setup
class CustomTextNode(TextNode):
def rag_key(self):
return ""
def model_dump_json(self):
return ""
objs = [CustomTextNode(text=f"text_{i}", metadata={"obj": f"obj_{i}"}) for i in range(2)]
engine = SimpleEngine(retriever=mock_retriever)
# Exec
engine.add_objs(objs=objs)
# Assert
assert mock_retriever.add_nodes.call_count == 1
for node in mock_retriever.add_nodes.call_args[0][0]:
assert isinstance(node, TextNode)
assert "is_obj" in node.metadata
def test_persist_successfully(self, mocker):
# Mock
mock_retriever = mocker.MagicMock(spec=PersistableRAGRetriever)
mock_retriever.persist.return_value = mocker.MagicMock()
# Setup
engine = SimpleEngine(retriever=mock_retriever)
# Exec
engine.persist(persist_dir="")
def test_ensure_retriever_of_type(self, mocker):
# Mock
class MyRetriever:
def add_nodes(self):
...
mock_retriever = mocker.MagicMock(spec=SimpleHybridRetriever)
mock_retriever.retrievers = [MyRetriever()]
# Setup
engine = SimpleEngine(retriever=mock_retriever)
# Assert
engine._ensure_retriever_of_type(ModifiableRAGRetriever)
with pytest.raises(TypeError):
engine._ensure_retriever_of_type(PersistableRAGRetriever)
with pytest.raises(TypeError):
other_engine = SimpleEngine(retriever=mocker.MagicMock(spec=ModifiableRAGRetriever))
other_engine._ensure_retriever_of_type(PersistableRAGRetriever)
def test_with_obj_metadata(self, mocker):
# Mock
node = NodeWithScore(
node=ObjectNode(
text="example",
metadata={
"is_obj": True,
"obj_cls_name": "ExampleObject",
"obj_mod_name": "__main__",
"obj_json": json.dumps({"key": "test_key", "value": "test_value"}),
},
)
)
class ExampleObject:
def __init__(self, key, value):
self.key = key
self.value = value
def __eq__(self, other):
return self.key == other.key and self.value == other.value
mock_import_class = mocker.patch("metagpt.rag.engines.simple.import_class")
mock_import_class.return_value = ExampleObject
# Setup
SimpleEngine._try_reconstruct_obj([node])
# Exec
expected_obj = ExampleObject(key="test_key", value="test_value")
# Assert
assert "obj" in node.node.metadata
assert node.node.metadata["obj"] == expected_obj
def test_get_file_extractor(self, mocker):
# mock no omniparse config
mock_omniparse_config = mocker.patch("metagpt.rag.engines.simple.config.omniparse", autospec=True)
mock_omniparse_config.base_url = ""
file_extractor = SimpleEngine._get_file_extractor()
assert file_extractor == {}
# mock have omniparse config
mock_omniparse_config.base_url = "http://localhost:8000"
file_extractor = SimpleEngine._get_file_extractor()
assert ".pdf" in file_extractor
assert isinstance(file_extractor[".pdf"], OmniParse)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/rag/retrievers/test_bm25_retriever.py | tests/metagpt/rag/retrievers/test_bm25_retriever.py | import pytest
from llama_index.core import VectorStoreIndex
from llama_index.core.schema import Node
from metagpt.rag.retrievers.bm25_retriever import DynamicBM25Retriever
class TestDynamicBM25Retriever:
@pytest.fixture(autouse=True)
def setup(self, mocker):
self.doc1 = mocker.MagicMock(spec=Node)
self.doc1.get_content.return_value = "Document content 1"
self.doc2 = mocker.MagicMock(spec=Node)
self.doc2.get_content.return_value = "Document content 2"
self.mock_nodes = [self.doc1, self.doc2]
index = mocker.MagicMock(spec=VectorStoreIndex)
index.storage_context.persist.return_value = "ok"
mock_nodes = []
mock_tokenizer = mocker.MagicMock()
self.mock_bm25okapi = mocker.patch("rank_bm25.BM25Okapi.__init__", return_value=None)
self.retriever = DynamicBM25Retriever(nodes=mock_nodes, tokenizer=mock_tokenizer, index=index)
def test_add_docs_updates_nodes_and_corpus(self):
# Exec
self.retriever.add_nodes(self.mock_nodes)
# Assert
assert len(self.retriever._nodes) == len(self.mock_nodes)
assert len(self.retriever._corpus) == len(self.mock_nodes)
self.retriever._tokenizer.assert_called()
self.mock_bm25okapi.assert_called()
def test_persist(self):
self.retriever.persist("")
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/rag/retrievers/test_es_retriever.py | tests/metagpt/rag/retrievers/test_es_retriever.py | import pytest
from llama_index.core.schema import Node
from metagpt.rag.retrievers.es_retriever import ElasticsearchRetriever
class TestElasticsearchRetriever:
@pytest.fixture(autouse=True)
def setup(self, mocker):
self.doc1 = mocker.MagicMock(spec=Node)
self.doc2 = mocker.MagicMock(spec=Node)
self.mock_nodes = [self.doc1, self.doc2]
self.mock_index = mocker.MagicMock()
self.retriever = ElasticsearchRetriever(self.mock_index)
def test_add_nodes(self):
self.retriever.add_nodes(self.mock_nodes)
self.mock_index.insert_nodes.assert_called()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/rag/retrievers/test_hybrid_retriever.py | tests/metagpt/rag/retrievers/test_hybrid_retriever.py | import pytest
from llama_index.core.schema import NodeWithScore, TextNode
from metagpt.rag.retrievers import SimpleHybridRetriever
class TestSimpleHybridRetriever:
@pytest.fixture
def mock_retriever(self, mocker):
return mocker.MagicMock()
@pytest.fixture
def mock_hybrid_retriever(self, mock_retriever) -> SimpleHybridRetriever:
return SimpleHybridRetriever(mock_retriever)
@pytest.fixture
def mock_node(self):
return NodeWithScore(node=TextNode(id_="2"), score=0.95)
@pytest.mark.asyncio
async def test_aretrieve(self, mocker):
question = "test query"
# Create mock retrievers
mock_retriever1 = mocker.AsyncMock()
mock_retriever1.aretrieve.return_value = [
NodeWithScore(node=TextNode(id_="1"), score=1.0),
NodeWithScore(node=TextNode(id_="2"), score=0.95),
]
mock_retriever2 = mocker.AsyncMock()
mock_retriever2.aretrieve.return_value = [
NodeWithScore(node=TextNode(id_="2"), score=0.95),
NodeWithScore(node=TextNode(id_="3"), score=0.8),
]
# Instantiate the SimpleHybridRetriever with the mock retrievers
hybrid_retriever = SimpleHybridRetriever(mock_retriever1, mock_retriever2)
# Call the _aretrieve method
results = await hybrid_retriever._aretrieve(question)
# Check if the results are as expected
assert len(results) == 3 # Should be 3 unique nodes
assert set(node.node.node_id for node in results) == {"1", "2", "3"}
# Check if the scores are correct (assuming you want the highest score)
node_scores = {node.node.node_id: node.score for node in results}
assert node_scores["2"] == 0.95
def test_add_nodes(self, mock_hybrid_retriever: SimpleHybridRetriever, mock_node):
mock_hybrid_retriever.add_nodes([mock_node])
mock_hybrid_retriever.retrievers[0].add_nodes.assert_called_once()
def test_persist(self, mock_hybrid_retriever: SimpleHybridRetriever):
mock_hybrid_retriever.persist("")
mock_hybrid_retriever.retrievers[0].persist.assert_called_once()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/rag/retrievers/test_base_retriever.py | tests/metagpt/rag/retrievers/test_base_retriever.py | from metagpt.rag.retrievers.base import ModifiableRAGRetriever, PersistableRAGRetriever
class SubModifiableRAGRetriever(ModifiableRAGRetriever):
...
class SubPersistableRAGRetriever(PersistableRAGRetriever):
...
class TestModifiableRAGRetriever:
def test_subclasshook(self):
result = SubModifiableRAGRetriever.__subclasshook__(SubModifiableRAGRetriever)
assert result is NotImplemented
class TestPersistableRAGRetriever:
def test_subclasshook(self):
result = SubPersistableRAGRetriever.__subclasshook__(SubPersistableRAGRetriever)
assert result is NotImplemented
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/rag/retrievers/test_faiss_retriever.py | tests/metagpt/rag/retrievers/test_faiss_retriever.py | import pytest
from llama_index.core.schema import Node
from metagpt.rag.retrievers.faiss_retriever import FAISSRetriever
class TestFAISSRetriever:
@pytest.fixture(autouse=True)
def setup(self, mocker):
self.doc1 = mocker.MagicMock(spec=Node)
self.doc2 = mocker.MagicMock(spec=Node)
self.mock_nodes = [self.doc1, self.doc2]
self.mock_index = mocker.MagicMock()
self.retriever = FAISSRetriever(self.mock_index)
def test_add_docs_calls_insert_for_each_document(self):
self.retriever.add_nodes(self.mock_nodes)
self.mock_index.insert_nodes.assert_called()
def test_persist(self):
self.retriever.persist("")
self.mock_index.storage_context.persist.assert_called()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/rag/retrievers/test_chroma_retriever.py | tests/metagpt/rag/retrievers/test_chroma_retriever.py | import pytest
from llama_index.core.schema import Node
from metagpt.rag.retrievers.chroma_retriever import ChromaRetriever
class TestChromaRetriever:
@pytest.fixture(autouse=True)
def setup(self, mocker):
self.doc1 = mocker.MagicMock(spec=Node)
self.doc2 = mocker.MagicMock(spec=Node)
self.mock_nodes = [self.doc1, self.doc2]
self.mock_index = mocker.MagicMock()
self.retriever = ChromaRetriever(self.mock_index)
def test_add_nodes(self):
self.retriever.add_nodes(self.mock_nodes)
self.mock_index.insert_nodes.assert_called()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/rag/parser/test_omniparse.py | tests/metagpt/rag/parser/test_omniparse.py | import pytest
from llama_index.core import Document
from metagpt.const import EXAMPLE_DATA_PATH
from metagpt.rag.parsers import OmniParse
from metagpt.rag.schema import (
OmniParsedResult,
OmniParseOptions,
OmniParseType,
ParseResultType,
)
from metagpt.utils.omniparse_client import OmniParseClient
# test data
TEST_DOCX = EXAMPLE_DATA_PATH / "omniparse/test01.docx"
TEST_PDF = EXAMPLE_DATA_PATH / "omniparse/test02.pdf"
TEST_VIDEO = EXAMPLE_DATA_PATH / "omniparse/test03.mp4"
TEST_AUDIO = EXAMPLE_DATA_PATH / "omniparse/test04.mp3"
class TestOmniParseClient:
parse_client = OmniParseClient()
@pytest.fixture
def mock_request_parse(self, mocker):
return mocker.patch("metagpt.rag.parsers.omniparse.OmniParseClient._request_parse")
@pytest.mark.asyncio
async def test_parse_pdf(self, mock_request_parse):
mock_content = "#test title\ntest content"
mock_parsed_ret = OmniParsedResult(text=mock_content, markdown=mock_content)
mock_request_parse.return_value = mock_parsed_ret.model_dump()
parse_ret = await self.parse_client.parse_pdf(TEST_PDF)
assert parse_ret == mock_parsed_ret
@pytest.mark.asyncio
async def test_parse_document(self, mock_request_parse):
mock_content = "#test title\ntest_parse_document"
mock_parsed_ret = OmniParsedResult(text=mock_content, markdown=mock_content)
mock_request_parse.return_value = mock_parsed_ret.model_dump()
with open(TEST_DOCX, "rb") as f:
file_bytes = f.read()
with pytest.raises(ValueError):
# bytes data must provide bytes_filename
await self.parse_client.parse_document(file_bytes)
parse_ret = await self.parse_client.parse_document(file_bytes, bytes_filename="test.docx")
assert parse_ret == mock_parsed_ret
@pytest.mark.asyncio
async def test_parse_video(self, mock_request_parse):
mock_content = "#test title\ntest_parse_video"
mock_request_parse.return_value = {
"text": mock_content,
"metadata": {},
}
with pytest.raises(ValueError):
# Wrong file extension test
await self.parse_client.parse_video(TEST_DOCX)
parse_ret = await self.parse_client.parse_video(TEST_VIDEO)
assert "text" in parse_ret and "metadata" in parse_ret
assert parse_ret["text"] == mock_content
@pytest.mark.asyncio
async def test_parse_audio(self, mock_request_parse):
mock_content = "#test title\ntest_parse_audio"
mock_request_parse.return_value = {
"text": mock_content,
"metadata": {},
}
parse_ret = await self.parse_client.parse_audio(TEST_AUDIO)
assert "text" in parse_ret and "metadata" in parse_ret
assert parse_ret["text"] == mock_content
class TestOmniParse:
@pytest.fixture
def mock_omniparse(self):
parser = OmniParse(
parse_options=OmniParseOptions(
parse_type=OmniParseType.PDF,
result_type=ParseResultType.MD,
max_timeout=120,
num_workers=3,
)
)
return parser
@pytest.fixture
def mock_request_parse(self, mocker):
return mocker.patch("metagpt.rag.parsers.omniparse.OmniParseClient._request_parse")
@pytest.mark.asyncio
async def test_load_data(self, mock_omniparse, mock_request_parse):
# mock
mock_content = "#test title\ntest content"
mock_parsed_ret = OmniParsedResult(text=mock_content, markdown=mock_content)
mock_request_parse.return_value = mock_parsed_ret.model_dump()
# single file
documents = mock_omniparse.load_data(file_path=TEST_PDF)
doc = documents[0]
assert isinstance(doc, Document)
assert doc.text == mock_parsed_ret.text == mock_parsed_ret.markdown
# multi files
file_paths = [TEST_DOCX, TEST_PDF]
mock_omniparse.parse_type = OmniParseType.DOCUMENT
documents = await mock_omniparse.aload_data(file_path=file_paths)
doc = documents[0]
# assert
assert isinstance(doc, Document)
assert len(documents) == len(file_paths)
assert doc.text == mock_parsed_ret.text == mock_parsed_ret.markdown
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_rebuild_sequence_view.py | tests/metagpt/actions/test_rebuild_sequence_view.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/1/4
@Author : mashenquan
@File : test_rebuild_sequence_view.py
@Desc : Unit tests for reconstructing the sequence diagram from a source code project.
"""
from pathlib import Path
import pytest
from metagpt.actions.rebuild_sequence_view import RebuildSequenceView
from metagpt.const import GRAPH_REPO_FILE_REPO
from metagpt.llm import LLM
from metagpt.utils.common import aread
from metagpt.utils.git_repository import ChangeType
from metagpt.utils.graph_repository import SPO
@pytest.mark.skip
@pytest.mark.asyncio
async def test_rebuild(context, mocker):
# Mock
data = await aread(filename=Path(__file__).parent / "../../data/graph_db/networkx.class_view.json")
graph_db_filename = Path(context.repo.workdir.name).with_suffix(".json")
await context.repo.docs.graph_repo.save(filename=str(graph_db_filename), content=data)
context.git_repo.add_change({f"{GRAPH_REPO_FILE_REPO}/{graph_db_filename}": ChangeType.UNTRACTED})
context.git_repo.commit("commit1")
# mock_spo = SPO(
# subject="metagpt/startup.py:__name__:__main__",
# predicate="has_page_info",
# object_='{"lineno":78,"end_lineno":79,"type_name":"ast.If","tokens":["__name__","__main__"],"properties":{}}',
# )
mock_spo = SPO(
subject="metagpt/management/skill_manager.py:__name__:__main__",
predicate="has_page_info",
object_='{"lineno":113,"end_lineno":116,"type_name":"ast.If","tokens":["__name__","__main__"],"properties":{}}',
)
mocker.patch.object(RebuildSequenceView, "_search_main_entry", return_value=[mock_spo])
action = RebuildSequenceView(
name="RedBean",
i_context=str(
Path(__file__).parent.parent.parent.parent / "metagpt/management/skill_manager.py:__name__:__main__"
),
llm=LLM(),
context=context,
)
await action.run()
rows = await action.graph_db.select()
assert rows
assert context.repo.docs.graph_repo.changed_files
@pytest.mark.parametrize(
("root", "pathname", "want"),
[
(Path(__file__).parent.parent.parent, "/".join(__file__.split("/")[-2:]), Path(__file__)),
(Path(__file__).parent.parent.parent, "f/g.txt", None),
],
)
def test_get_full_filename(root, pathname, want):
res = RebuildSequenceView.get_full_filename(root=root, pathname=pathname)
assert res == want
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_prepare_interview.py | tests/metagpt/actions/test_prepare_interview.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/9/13 00:26
@Author : fisherdeng
@File : test_generate_questions.py
"""
import pytest
from metagpt.actions.prepare_interview import PrepareInterview
from metagpt.logs import logger
@pytest.mark.asyncio
async def test_prepare_interview(context):
action = PrepareInterview(context=context)
rsp = await action.run("I just graduated and hope to find a job as a Python engineer")
logger.info(f"{rsp.content=}")
assert "Questions" in rsp.content
assert "1." in rsp.content
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_prepare_documents.py | tests/metagpt/actions/test_prepare_documents.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/12/6
@Author : mashenquan
@File : test_prepare_documents.py
@Desc: Unit test for prepare_documents.py
"""
import pytest
from metagpt.actions.prepare_documents import PrepareDocuments
from metagpt.const import REQUIREMENT_FILENAME
from metagpt.context import Context
from metagpt.schema import Message
@pytest.mark.asyncio
async def test_prepare_documents():
msg = Message(content="New user requirements balabala...")
context = Context()
await PrepareDocuments(context=context).run(with_messages=[msg])
assert context.git_repo
assert context.repo
doc = await context.repo.docs.get(filename=REQUIREMENT_FILENAME)
assert doc
assert doc.content == msg.content
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_design_api_an.py | tests/metagpt/actions/test_design_api_an.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/01/03
@Author : mannaandpoem
@File : test_design_api_an.py
"""
import pytest
from openai._models import BaseModel
from metagpt.actions.action_node import ActionNode, dict_to_markdown
from metagpt.actions.design_api import NEW_REQ_TEMPLATE
from metagpt.actions.design_api_an import REFINED_DESIGN_NODE
from metagpt.llm import LLM
from tests.data.incremental_dev_project.mock import (
DESIGN_SAMPLE,
REFINED_DESIGN_JSON,
REFINED_PRD_JSON,
)
@pytest.fixture()
def llm():
return LLM()
def mock_refined_design_json():
return REFINED_DESIGN_JSON
@pytest.mark.asyncio
async def test_write_design_an(mocker):
root = ActionNode.from_children(
"RefinedDesignAPI", [ActionNode(key="", expected_type=str, instruction="", example="")]
)
root.instruct_content = BaseModel()
root.instruct_content.model_dump = mock_refined_design_json
mocker.patch("metagpt.actions.design_api_an.REFINED_DESIGN_NODE.fill", return_value=root)
prompt = NEW_REQ_TEMPLATE.format(old_design=DESIGN_SAMPLE, context=dict_to_markdown(REFINED_PRD_JSON))
node = await REFINED_DESIGN_NODE.fill(req=prompt, llm=llm)
assert "Refined Implementation Approach" in node.instruct_content.model_dump()
assert "Refined File list" in node.instruct_content.model_dump()
assert "Refined Data structures and interfaces" in node.instruct_content.model_dump()
assert "Refined Program call flow" in node.instruct_content.model_dump()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/mock_json.py | tests/metagpt/actions/mock_json.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/12/24 20:32
@Author : alexanderwu
@File : mock_json.py
"""
PRD = {
"Language": "zh_cn",
"Programming Language": "Python",
"Original Requirements": "写一个简单的cli贪吃蛇",
"Project Name": "cli_snake",
"Product Goals": ["创建一个简单易用的贪吃蛇游戏", "提供良好的用户体验", "支持不同难度级别"],
"User Stories": [
"作为玩家,我希望能够选择不同的难度级别",
"作为玩家,我希望在每局游戏结束后能够看到我的得分",
"作为玩家,我希望在输掉游戏后能够重新开始",
"作为玩家,我希望看到简洁美观的界面",
"作为玩家,我希望能够在手机上玩游戏",
],
"Competitive Analysis": ["贪吃蛇游戏A:界面简单,缺乏响应式特性", "贪吃蛇游戏B:美观且响应式的界面,显示最高得分", "贪吃蛇游戏C:响应式界面,显示最高得分,但有很多广告"],
"Competitive Quadrant Chart": 'quadrantChart\n title "Reach and engagement of campaigns"\n x-axis "Low Reach" --> "High Reach"\n y-axis "Low Engagement" --> "High Engagement"\n quadrant-1 "We should expand"\n quadrant-2 "Need to promote"\n quadrant-3 "Re-evaluate"\n quadrant-4 "May be improved"\n "Game A": [0.3, 0.6]\n "Game B": [0.45, 0.23]\n "Game C": [0.57, 0.69]\n "Game D": [0.78, 0.34]\n "Game E": [0.40, 0.34]\n "Game F": [0.35, 0.78]\n "Our Target Product": [0.5, 0.6]',
"Requirement Analysis": "",
"Requirement Pool": [["P0", "主要代码..."], ["P0", "游戏算法..."]],
"UI Design draft": "基本功能描述,简单的风格和布局。",
"Anything UNCLEAR": "",
}
DESIGN = {
"Implementation approach": "我们将使用Python编程语言,并选择合适的开源框架来实现贪吃蛇游戏。我们将分析需求中的难点,并选择合适的开源框架来简化开发流程。",
"File list": ["main.py", "game.py"],
"Data structures and interfaces": "\nclassDiagram\n class Game {\n -int width\n -int height\n -int score\n -int speed\n -List<Point> snake\n -Point food\n +__init__(width: int, height: int, speed: int)\n +start_game()\n +change_direction(direction: str)\n +game_over()\n +update_snake()\n +update_food()\n +check_collision()\n }\n class Point {\n -int x\n -int y\n +__init__(x: int, y: int)\n }\n Game --> Point\n",
"Program call flow": "\nsequenceDiagram\n participant M as Main\n participant G as Game\n M->>G: start_game()\n M->>G: change_direction(direction)\n G->>G: update_snake()\n G->>G: update_food()\n G->>G: check_collision()\n G-->>G: game_over()\n",
"Anything UNCLEAR": "",
}
TASK = {
"Required packages": ["pygame==2.0.1"],
"Required Other language third-party packages": ["No third-party dependencies required"],
"Logic Analysis": [
["game.py", "Contains Game class and related functions for game logic"],
["main.py", "Contains the main function, imports Game class from game.py"],
],
"Task list": ["game.py", "main.py"],
"Full API spec": "",
"Shared Knowledge": "'game.py' contains functions shared across the project.",
"Anything UNCLEAR": "",
}
FILE_GAME = """## game.py
import pygame
import random
class Point:
def __init__(self, x: int, y: int):
self.x = x
self.y = y
class Game:
def __init__(self, width: int, height: int, speed: int):
self.width = width
self.height = height
self.score = 0
self.speed = speed
self.snake = [Point(width // 2, height // 2)]
self.food = self._create_food()
def start_game(self):
pygame.init()
self._display = pygame.display.set_mode((self.width, self.height))
pygame.display.set_caption('Snake Game')
self._clock = pygame.time.Clock()
self._running = True
while self._running:
self._handle_events()
self._update_snake()
self._update_food()
self._check_collision()
self._draw_screen()
self._clock.tick(self.speed)
def change_direction(self, direction: str):
# Update the direction of the snake based on user input
pass
def game_over(self):
# Display game over message and handle game over logic
pass
def _create_food(self) -> Point:
# Create and return a new food Point
return Point(random.randint(0, self.width - 1), random.randint(0, self.height - 1))
def _handle_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self._running = False
def _update_snake(self):
# Update the position of the snake based on its direction
pass
def _update_food(self):
# Update the position of the food if the snake eats it
pass
def _check_collision(self):
# Check for collision between the snake and the walls or itself
pass
def _draw_screen(self):
self._display.fill((0, 0, 0)) # Clear the screen
# Draw the snake and food on the screen
pygame.display.update()
if __name__ == "__main__":
game = Game(800, 600, 15)
game.start_game()
"""
FILE_GAME_CR_1 = """## Code Review: game.py
1. Yes, the code is implemented as per the requirements. It initializes the game with the specified width, height, and speed, and starts the game loop.
2. No, the logic for handling events and updating the snake, food, and collision is not implemented. To correct this, we need to implement the logic for handling events, updating the snake and food positions, and checking for collisions.
3. Yes, the existing code follows the "Data structures and interfaces" by defining the Game and Point classes with the specified attributes and methods.
4. No, several functions such as change_direction, game_over, _update_snake, _update_food, and _check_collision are not implemented. These functions need to be implemented to complete the game logic.
5. Yes, all necessary pre-dependencies have been imported. The required pygame package is imported at the beginning of the file.
6. No, methods from other files are not being reused as there are no other files being imported or referenced in the current code.
## Actions
1. Implement the logic for handling events, updating the snake and food positions, and checking for collisions within the Game class.
2. Implement the change_direction and game_over methods to handle user input and game over logic.
3. Implement the _update_snake method to update the position of the snake based on its direction.
4. Implement the _update_food method to update the position of the food if the snake eats it.
5. Implement the _check_collision method to check for collision between the snake and the walls or itself.
## Code Review Result
LBTM"""
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_write_prd_review.py | tests/metagpt/actions/test_write_prd_review.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/11 17:45
@Author : alexanderwu
@File : test_write_prd_review.py
"""
import pytest
from metagpt.actions.write_prd_review import WritePRDReview
@pytest.mark.asyncio
async def test_write_prd_review(context):
prd = """
Introduction: This is a new feature for our product.
Goals: The goal is to improve user engagement.
User Scenarios: The expected user group is millennials who like to use social media.
Requirements: The feature needs to be interactive and user-friendly.
Constraints: The feature needs to be implemented within 2 months.
Mockups: There will be a new button on the homepage that users can click to access the feature.
Metrics: We will measure the success of the feature by user engagement metrics.
Timeline: The feature should be ready for testing in 1.5 months.
"""
write_prd_review = WritePRDReview(name="write_prd_review", context=context)
prd_review = await write_prd_review.run(prd)
# We cannot exactly predict the generated PRD review, but we can check if it is a string and if it is not empty
assert isinstance(prd_review, str)
assert len(prd_review) > 0
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_generate_questions.py | tests/metagpt/actions/test_generate_questions.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/9/13 00:26
@Author : fisherdeng
@File : test_generate_questions.py
"""
import pytest
from metagpt.actions.generate_questions import GenerateQuestions
from metagpt.logs import logger
msg = """
## topic
如何做一个生日蛋糕
## record
我认为应该先准备好材料,然后再开始做蛋糕。
"""
@pytest.mark.asyncio
async def test_generate_questions(context):
action = GenerateQuestions(context=context)
rsp = await action.run(msg)
logger.info(f"{rsp.content=}")
assert "Questions" in rsp.content
assert "1." in rsp.content
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_write_tutorial.py | tests/metagpt/actions/test_write_tutorial.py | #!/usr/bin/env python3
# _*_ coding: utf-8 _*_
"""
@Time : 2023/9/6 21:41:34
@Author : Stitch-z
@File : test_write_tutorial.py
"""
from typing import Dict
import pytest
from metagpt.actions.write_tutorial import WriteContent, WriteDirectory
@pytest.mark.asyncio
@pytest.mark.parametrize(("language", "topic"), [("English", "Write a tutorial about Python")])
async def test_write_directory(language: str, topic: str, context):
ret = await WriteDirectory(language=language, context=context).run(topic=topic)
assert isinstance(ret, dict)
assert "title" in ret
assert "directory" in ret
assert isinstance(ret["directory"], list)
assert len(ret["directory"])
assert isinstance(ret["directory"][0], dict)
@pytest.mark.asyncio
@pytest.mark.parametrize(
("language", "topic", "directory"),
[("English", "Write a tutorial about Python", {"Introduction": ["What is Python?", "Why learn Python?"]})],
)
async def test_write_content(language: str, topic: str, directory: Dict, context):
ret = await WriteContent(language=language, directory=directory, context=context).run(topic=topic)
assert isinstance(ret, str)
assert list(directory.keys())[0] in ret
for value in list(directory.values())[0]:
assert value in ret
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_summarize_code.py | tests/metagpt/actions/test_summarize_code.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/11 17:46
@Author : mashenquan
@File : test_summarize_code.py
@Modifiled By: mashenquan, 2023-12-6. Unit test for summarize_code.py
"""
import pytest
from metagpt.actions.summarize_code import SummarizeCode
from metagpt.logs import logger
from metagpt.schema import CodeSummarizeContext
from tests.mock.mock_llm import MockLLM
DESIGN_CONTENT = """
{"Implementation approach": "To develop this snake game, we will use the Python language and choose the Pygame library. Pygame is an open-source Python module collection specifically designed for writing video games. It provides functionalities such as displaying images and playing sounds, making it suitable for creating intuitive and responsive user interfaces. We will ensure efficient game logic to prevent any delays during gameplay. The scoring system will be simple, with the snake gaining points for each food it eats. We will use Pygame's event handling system to implement pause and resume functionality, as well as high-score tracking. The difficulty will increase by speeding up the snake's movement. In the initial version, we will focus on single-player mode and consider adding multiplayer mode and customizable skins in future updates. Based on the new requirement, we will also add a moving obstacle that appears randomly. If the snake eats this obstacle, the game will end. If the snake does not eat the obstacle, it will disappear after 5 seconds. For this, we need to add mechanisms for obstacle generation, movement, and disappearance in the game logic.", "Project_name": "snake_game", "File list": ["main.py", "game.py", "snake.py", "food.py", "obstacle.py", "scoreboard.py", "constants.py", "assets/styles.css", "assets/index.html"], "Data structures and interfaces": "```mermaid\n classDiagram\n class Game{\n +int score\n +int speed\n +bool game_over\n +bool paused\n +Snake snake\n +Food food\n +Obstacle obstacle\n +Scoreboard scoreboard\n +start_game() void\n +pause_game() void\n +resume_game() void\n +end_game() void\n +increase_difficulty() void\n +update() void\n +render() void\n Game()\n }\n class Snake{\n +list body_parts\n +str direction\n +bool grow\n +move() void\n +grow() void\n +check_collision() bool\n Snake()\n }\n class Food{\n +tuple position\n +spawn() void\n Food()\n }\n class Obstacle{\n +tuple position\n +int lifetime\n +bool active\n +spawn() void\n +move() void\n +check_collision() bool\n +disappear() void\n Obstacle()\n }\n class Scoreboard{\n +int high_score\n +update_score(int) void\n +reset_score() void\n +load_high_score() void\n +save_high_score() void\n Scoreboard()\n }\n class Constants{\n }\n Game \"1\" -- \"1\" Snake: has\n Game \"1\" -- \"1\" Food: has\n Game \"1\" -- \"1\" Obstacle: has\n Game \"1\" -- \"1\" Scoreboard: has\n ```", "Program call flow": "```sequenceDiagram\n participant M as Main\n participant G as Game\n participant S as Snake\n participant F as Food\n participant O as Obstacle\n participant SB as Scoreboard\n M->>G: start_game()\n loop game loop\n G->>S: move()\n G->>S: check_collision()\n G->>F: spawn()\n G->>O: spawn()\n G->>O: move()\n G->>O: check_collision()\n G->>O: disappear()\n G->>SB: update_score(score)\n G->>G: update()\n G->>G: render()\n alt if paused\n M->>G: pause_game()\n M->>G: resume_game()\n end\n alt if game_over\n G->>M: end_game()\n end\n end\n```", "Anything UNCLEAR": "There is no need for further clarification as the requirements are already clear."}
"""
TASK_CONTENT = """
{"Required Python third-party packages": ["pygame==2.0.1"], "Required Other language third-party packages": ["No third-party packages required for other languages."], "Full API spec": "\n openapi: 3.0.0\n info:\n title: Snake Game API\n version: \"1.0.0\"\n paths:\n /start:\n get:\n summary: Start the game\n responses:\n '200':\n description: Game started successfully\n /pause:\n get:\n summary: Pause the game\n responses:\n '200':\n description: Game paused successfully\n /resume:\n get:\n summary: Resume the game\n responses:\n '200':\n description: Game resumed successfully\n /end:\n get:\n summary: End the game\n responses:\n '200':\n description: Game ended successfully\n /score:\n get:\n summary: Get the current score\n responses:\n '200':\n description: Current score retrieved successfully\n /highscore:\n get:\n summary: Get the high score\n responses:\n '200':\n description: High score retrieved successfully\n components: {}\n ", "Logic Analysis": [["constants.py", "Contains all the constant values like screen size, colors, game speeds, etc. This should be implemented first as it provides the base values for other components."], ["snake.py", "Contains the Snake class with methods for movement, growth, and collision detection. It is dependent on constants.py for configuration values."], ["food.py", "Contains the Food class responsible for spawning food items on the screen. It is dependent on constants.py for configuration values."], ["obstacle.py", "Contains the Obstacle class with methods for spawning, moving, and disappearing of obstacles, as well as collision detection with the snake. It is dependent on constants.py for configuration values."], ["scoreboard.py", "Contains the Scoreboard class for updating, resetting, loading, and saving high scores. It may use constants.py for configuration values and depends on the game's scoring logic."], ["game.py", "Contains the main Game class which includes the game loop and methods for starting, pausing, resuming, and ending the game. It is dependent on snake.py, food.py, obstacle.py, and scoreboard.py."], ["main.py", "The entry point of the game that initializes the game and starts the game loop. It is dependent on game.py."]], "Task list": ["constants.py", "snake.py", "food.py", "obstacle.py", "scoreboard.py", "game.py", "main.py"], "Shared Knowledge": "\n 'constants.py' should contain all the necessary configurations for the game, such as screen dimensions, color definitions, and speed settings. These constants will be used across multiple files, ensuring consistency and ease of updates. Ensure that the Pygame library is initialized correctly in 'main.py' before starting the game loop. Also, make sure that the game's state is managed properly when pausing and resuming the game.\n ", "Anything UNCLEAR": "The interaction between the 'obstacle.py' and the game loop needs to be clearly defined to ensure obstacles appear and disappear correctly. The lifetime of the obstacle and its random movement should be implemented in a way that does not interfere with the game's performance."}
"""
FOOD_PY = """
## food.py
import random
class Food:
def __init__(self):
self.position = (0, 0)
def generate(self):
x = random.randint(0, 9)
y = random.randint(0, 9)
self.position = (x, y)
def get_position(self):
return self.position
"""
GAME_PY = """
## game.py
import pygame
from snake import Snake
from food import Food
class Game:
def __init__(self):
self.score = 0
self.level = 1
self.snake = Snake()
self.food = Food()
def start_game(self):
pygame.init()
self.initialize_game()
self.game_loop()
def initialize_game(self):
self.score = 0
self.level = 1
self.snake.reset()
self.food.generate()
def game_loop(self):
game_over = False
while not game_over:
self.update()
self.draw()
self.handle_events()
self.check_collision()
self.increase_score()
self.increase_level()
if self.snake.is_collision():
game_over = True
self.game_over()
def update(self):
self.snake.move()
def draw(self):
self.snake.draw()
self.food.draw()
def handle_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
self.snake.change_direction("UP")
elif event.key == pygame.K_DOWN:
self.snake.change_direction("DOWN")
elif event.key == pygame.K_LEFT:
self.snake.change_direction("LEFT")
elif event.key == pygame.K_RIGHT:
self.snake.change_direction("RIGHT")
def check_collision(self):
if self.snake.get_head() == self.food.get_position():
self.snake.grow()
self.food.generate()
def increase_score(self):
self.score += 1
def increase_level(self):
if self.score % 10 == 0:
self.level += 1
def game_over(self):
print("Game Over")
self.initialize_game()
"""
MAIN_PY = """
## main.py
import pygame
from game import Game
def main():
pygame.init()
game = Game()
game.start_game()
if __name__ == "__main__":
main()
"""
SNAKE_PY = """
## snake.py
import pygame
class Snake:
def __init__(self):
self.body = [(0, 0)]
self.direction = (1, 0)
def move(self):
head = self.body[0]
dx, dy = self.direction
new_head = (head[0] + dx, head[1] + dy)
self.body.insert(0, new_head)
self.body.pop()
def change_direction(self, direction):
if direction == "UP":
self.direction = (0, -1)
elif direction == "DOWN":
self.direction = (0, 1)
elif direction == "LEFT":
self.direction = (-1, 0)
elif direction == "RIGHT":
self.direction = (1, 0)
def grow(self):
tail = self.body[-1]
dx, dy = self.direction
new_tail = (tail[0] - dx, tail[1] - dy)
self.body.append(new_tail)
def get_head(self):
return self.body[0]
def get_body(self):
return self.body[1:]
"""
mock_rsp = """
```mermaid
classDiagram
class Game{
+int score
+int level
+Snake snake
+Food food
+start_game() void
+initialize_game() void
+game_loop() void
+update() void
+draw() void
+handle_events() void
+check_collision() void
+increase_score() void
+increase_level() void
+game_over() void
Game()
}
class Snake{
+list body
+tuple direction
+move() void
+change_direction(direction: str) void
+grow() void
+get_head() tuple
+get_body() list
Snake()
}
class Food{
+tuple position
+generate() void
+get_position() tuple
Food()
}
Game "1" -- "1" Snake: has
Game "1" -- "1" Food: has
```
```sequenceDiagram
participant M as Main
participant G as Game
participant S as Snake
participant F as Food
M->>G: start_game()
G->>G: initialize_game()
G->>G: game_loop()
G->>S: move()
G->>S: change_direction()
G->>S: grow()
G->>F: generate()
S->>S: move()
S->>S: change_direction()
S->>S: grow()
F->>F: generate()
```
## Summary
The code consists of the main game logic, including the Game, Snake, and Food classes. The game loop is responsible for updating and drawing the game elements, handling events, checking collisions, and managing the game state. The Snake class handles the movement, growth, and direction changes of the snake, while the Food class is responsible for generating and tracking the position of food items.
## TODOs
- Modify 'game.py' to add the implementation of obstacle handling and interaction with the game loop.
- Implement 'obstacle.py' to include the methods for spawning, moving, and disappearing of obstacles, as well as collision detection with the snake.
- Update 'main.py' to initialize the obstacle and incorporate it into the game loop.
- Update the mermaid call flow diagram to include the interaction with the obstacle.
```python
{
"files_to_modify": {
"game.py": "Add obstacle handling and interaction with the game loop",
"obstacle.py": "Implement obstacle class with necessary methods",
"main.py": "Initialize the obstacle and incorporate it into the game loop"
}
}
```
"""
@pytest.mark.asyncio
async def test_summarize_code(context, mocker):
context.src_workspace = context.git_repo.workdir / "src"
await context.repo.docs.system_design.save(filename="1.json", content=DESIGN_CONTENT)
await context.repo.docs.task.save(filename="1.json", content=TASK_CONTENT)
await context.repo.with_src_path(context.src_workspace).srcs.save(filename="food.py", content=FOOD_PY)
assert context.repo.srcs.workdir == context.src_workspace
await context.repo.srcs.save(filename="game.py", content=GAME_PY)
await context.repo.srcs.save(filename="main.py", content=MAIN_PY)
await context.repo.srcs.save(filename="snake.py", content=SNAKE_PY)
mocker.patch.object(MockLLM, "_mock_rsp", return_value=mock_rsp)
all_files = context.repo.srcs.all_files
summarization_context = CodeSummarizeContext(
design_filename="1.json", task_filename="1.json", codes_filenames=all_files
)
action = SummarizeCode(context=context, i_context=summarization_context)
rsp = await action.run()
assert rsp
logger.info(rsp)
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_invoice_ocr.py | tests/metagpt/actions/test_invoice_ocr.py | #!/usr/bin/env python3
# _*_ coding: utf-8 _*_
"""
@Time : 2023/10/09 18:40:34
@Author : Stitch-z
@File : test_invoice_ocr.py
"""
from pathlib import Path
import pytest
from metagpt.actions.invoice_ocr import GenerateTable, InvoiceOCR, ReplyQuestion
from metagpt.const import TEST_DATA_PATH
@pytest.mark.asyncio
@pytest.mark.parametrize(
"invoice_path",
[
Path("invoices/invoice-3.jpg"),
Path("invoices/invoice-4.zip"),
],
)
async def test_invoice_ocr(invoice_path: Path, context):
invoice_path = TEST_DATA_PATH / invoice_path
resp = await InvoiceOCR(context=context).run(file_path=Path(invoice_path))
assert isinstance(resp, list)
@pytest.mark.asyncio
@pytest.mark.parametrize(
("invoice_path", "expected_result"),
[
(Path("invoices/invoice-1.pdf"), {"收款人": "小明", "城市": "深圳", "总费用/元": 412.00, "开票日期": "2023年02月03日"}),
],
)
async def test_generate_table(invoice_path: Path, expected_result: dict):
invoice_path = TEST_DATA_PATH / invoice_path
filename = invoice_path.name
ocr_result = await InvoiceOCR().run(file_path=Path(invoice_path))
table_data = await GenerateTable().run(ocr_results=ocr_result, filename=filename)
assert isinstance(table_data, list)
table_data = table_data[0]
assert expected_result["收款人"] == table_data["收款人"]
assert expected_result["城市"] in table_data["城市"]
assert float(expected_result["总费用/元"]) == float(table_data["总费用/元"])
assert expected_result["开票日期"] == table_data["开票日期"]
@pytest.mark.asyncio
@pytest.mark.parametrize(
("invoice_path", "query", "expected_result"),
[(Path("invoices/invoice-1.pdf"), "Invoicing date", "2023年02月03日")],
)
async def test_reply_question(invoice_path: Path, query: dict, expected_result: str):
invoice_path = TEST_DATA_PATH / invoice_path
ocr_result = await InvoiceOCR().run(file_path=Path(invoice_path))
result = await ReplyQuestion().run(query=query, ocr_result=ocr_result)
assert expected_result in result
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_write_review.py | tests/metagpt/actions/test_write_review.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/12/20 15:01
@Author : alexanderwu
@File : test_write_review.py
"""
import pytest
from metagpt.actions.write_review import WriteReview
TEMPLATE_CONTEXT = """
{
"Language": "zh_cn",
"Programming Language": "Python",
"Original Requirements": "写一个简单的2048",
"Project Name": "game_2048",
"Product Goals": [
"创建一个引人入胜的用户体验",
"确保高性能",
"提供可定制的功能"
],
"User Stories": [
"作为用户,我希望能够选择不同的难度级别",
"作为玩家,我希望在每局游戏结束后能看到我的得分"
],
"Competitive Analysis": [
"Python Snake Game: 界面简单,缺乏高级功能"
],
"Competitive Quadrant Chart": "quadrantChart\n title \"Reach and engagement of campaigns\"\n x-axis \"Low Reach\" --> \"High Reach\"\n y-axis \"Low Engagement\" --> \"High Engagement\"\n quadrant-1 \"我们应该扩展\"\n quadrant-2 \"需要推广\"\n quadrant-3 \"重新评估\"\n quadrant-4 \"可能需要改进\"\n \"Campaign A\": [0.3, 0.6]\n \"Campaign B\": [0.45, 0.23]\n \"Campaign C\": [0.57, 0.69]\n \"Campaign D\": [0.78, 0.34]\n \"Campaign E\": [0.40, 0.34]\n \"Campaign F\": [0.35, 0.78]\n \"Our Target Product\": [0.5, 0.6]",
"Requirement Analysis": "产品应该用户友好。",
"Requirement Pool": [
[
"P0",
"主要代码..."
],
[
"P0",
"游戏算法..."
]
],
"UI Design draft": "基本功能描述,简单的风格和布局。",
"Anything UNCLEAR": "..."
}
"""
@pytest.mark.asyncio
async def test_write_review(context):
write_review = WriteReview(context=context)
review = await write_review.run(TEMPLATE_CONTEXT)
assert review.instruct_content
assert review.get("LGTM") in ["LGTM", "LBTM"]
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_design_api_review.py | tests/metagpt/actions/test_design_api_review.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/11 19:31
@Author : alexanderwu
@File : test_design_api_review.py
"""
import pytest
from metagpt.actions.design_api_review import DesignReview
@pytest.mark.asyncio
async def test_design_api_review(context):
prd = "我们需要一个音乐播放器,它应该有播放、暂停、上一曲、下一曲等功能。"
api_design = """
数据结构:
1. Song: 包含歌曲信息,如标题、艺术家等。
2. Playlist: 包含一系列歌曲。
API列表:
1. play(song: Song): 开始播放指定的歌曲。
2. pause(): 暂停当前播放的歌曲。
3. next(): 跳到播放列表的下一首歌曲。
4. previous(): 跳到播放列表的上一首歌曲。
"""
_ = "API设计看起来非常合理,满足了PRD中的所有需求。"
design_api_review = DesignReview(context=context)
result = await design_api_review.run(prd, api_design)
_ = f"以下是产品需求文档(PRD):\n\n{prd}\n\n以下是基于这个PRD设计的API列表:\n\n{api_design}\n\n请审查这个API设计是否满足PRD的需求,以及是否符合良好的设计实践。"
# mock_llm.ask.assert_called_once_with(prompt)
assert len(result) > 0
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_debug_error.py | tests/metagpt/actions/test_debug_error.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/11 17:46
@Author : alexanderwu
@File : test_debug_error.py
@Modifiled By: mashenquan, 2023-12-6. According to RFC 135
"""
import uuid
import pytest
from metagpt.actions.debug_error import DebugError
from metagpt.schema import RunCodeContext, RunCodeResult
CODE_CONTENT = '''
from typing import List
from deck import Deck
from card import Card
class Player:
"""
A class representing a player in the Black Jack game.
"""
def __init__(self, name: str):
"""
Initialize a Player object.
Args:
name (str): The name of the player.
"""
self.name = name
self.hand: List[Card] = []
self.score = 0
def draw(self, deck: Deck):
"""
Draw a card from the deck and add it to the player's hand.
Args:
deck (Deck): The deck of cards.
"""
card = deck.draw_card()
self.hand.append(card)
self.calculate_score()
def calculate_score(self) -> int:
"""
Calculate the score of the player's hand.
Returns:
int: The score of the player's hand.
"""
self.score = sum(card.value for card in self.hand)
# Handle the case where Ace is counted as 11 and causes the score to exceed 21
if self.score > 21 and any(card.rank == 'A' for card in self.hand):
self.score -= 10
return self.score
'''
TEST_CONTENT = """
import unittest
from blackjack_game.player import Player
from blackjack_game.deck import Deck
from blackjack_game.card import Card
class TestPlayer(unittest.TestCase):
## Test the Player's initialization
def test_player_initialization(self):
player = Player("Test Player")
self.assertEqual(player.name, "Test Player")
self.assertEqual(player.hand, [])
self.assertEqual(player.score, 0)
## Test the Player's draw method
def test_player_draw(self):
deck = Deck()
player = Player("Test Player")
player.draw(deck)
self.assertEqual(len(player.hand), 1)
self.assertEqual(player.score, player.hand[0].value)
## Test the Player's calculate_score method
def test_player_calculate_score(self):
deck = Deck()
player = Player("Test Player")
player.draw(deck)
player.draw(deck)
self.assertEqual(player.score, sum(card.value for card in player.hand))
## Test the Player's calculate_score method with Ace card
def test_player_calculate_score_with_ace(self):
deck = Deck()
player = Player("Test Player")
player.hand.append(Card('A', 'Hearts', 11))
player.hand.append(Card('K', 'Hearts', 10))
player.calculate_score()
self.assertEqual(player.score, 21)
## Test the Player's calculate_score method with multiple Aces
def test_player_calculate_score_with_multiple_aces(self):
deck = Deck()
player = Player("Test Player")
player.hand.append(Card('A', 'Hearts', 11))
player.hand.append(Card('A', 'Diamonds', 11))
player.calculate_score()
self.assertEqual(player.score, 12)
if __name__ == '__main__':
unittest.main()
"""
@pytest.mark.asyncio
async def test_debug_error(context):
context.src_workspace = context.git_repo.workdir / uuid.uuid4().hex
ctx = RunCodeContext(
code_filename="player.py",
test_filename="test_player.py",
command=["python", "tests/test_player.py"],
output_filename="output.log",
)
await context.repo.with_src_path(context.src_workspace).srcs.save(filename=ctx.code_filename, content=CODE_CONTENT)
await context.repo.tests.save(filename=ctx.test_filename, content=TEST_CONTENT)
output_data = RunCodeResult(
stdout=";",
stderr="",
summary="======================================================================\n"
"FAIL: test_player_calculate_score_with_multiple_aces (__main__.TestPlayer)\n"
"----------------------------------------------------------------------\n"
"Traceback (most recent call last):\n"
' File "tests/test_player.py", line 46, in test_player_calculate_score_'
"with_multiple_aces\n"
" self.assertEqual(player.score, 12)\nAssertionError: 22 != 12\n\n"
"----------------------------------------------------------------------\n"
"Ran 5 tests in 0.007s\n\nFAILED (failures=1)\n;\n",
)
await context.repo.test_outputs.save(filename=ctx.output_filename, content=output_data.model_dump_json())
debug_error = DebugError(i_context=ctx, context=context)
rsp = await debug_error.run()
assert "class Player" in rsp # rewrite the same class
# a key logic to rewrite to (original one is "if self.score > 12")
assert "self.score" in rsp
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_fix_bug.py | tests/metagpt/actions/test_fix_bug.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/12/25 22:38
@Author : alexanderwu
@File : test_fix_bug.py
"""
import pytest
from metagpt.actions.fix_bug import FixBug
@pytest.mark.asyncio
async def test_fix_bug(context):
fix_bug = FixBug(context=context)
assert fix_bug.name == "FixBug"
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_research.py | tests/metagpt/actions/test_research.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/12/28
@Author : mashenquan
@File : test_research.py
"""
import pytest
from metagpt.actions import research
from metagpt.tools import SearchEngineType
from metagpt.tools.search_engine import SearchEngine
@pytest.mark.asyncio
async def test_collect_links(mocker, search_engine_mocker, context):
async def mock_llm_ask(self, prompt: str, system_msgs):
if "Please provide up to 2 necessary keywords" in prompt:
return '["metagpt", "llm"]'
elif "Provide up to 4 queries related to your research topic" in prompt:
return (
'["MetaGPT use cases", "The roadmap of MetaGPT", '
'"The function of MetaGPT", "What llm MetaGPT support"]'
)
elif "sort the remaining search results" in prompt:
return "[1,2]"
mocker.patch("metagpt.provider.base_llm.BaseLLM.aask", mock_llm_ask)
resp = await research.CollectLinks(
search_engine=SearchEngine(engine=SearchEngineType.DUCK_DUCK_GO), context=context
).run("The application of MetaGPT")
for i in ["MetaGPT use cases", "The roadmap of MetaGPT", "The function of MetaGPT", "What llm MetaGPT support"]:
assert i in resp
@pytest.mark.asyncio
async def test_collect_links_with_rank_func(mocker, search_engine_mocker, context):
rank_before = []
rank_after = []
url_per_query = 4
def rank_func(results):
results = results[:url_per_query]
rank_before.append(results)
results = results[::-1]
rank_after.append(results)
return results
mocker.patch("metagpt.provider.base_llm.BaseLLM.aask", mock_collect_links_llm_ask)
resp = await research.CollectLinks(
search_engine=SearchEngine(engine=SearchEngineType.DUCK_DUCK_GO),
rank_func=rank_func,
context=context,
).run("The application of MetaGPT")
for x, y, z in zip(rank_before, rank_after, resp.values()):
assert x[::-1] == y
assert [i["link"] for i in y] == z
@pytest.mark.asyncio
async def test_web_browse_and_summarize(mocker, context):
async def mock_llm_ask(*args, **kwargs):
return "metagpt"
mocker.patch("metagpt.provider.base_llm.BaseLLM.aask", mock_llm_ask)
url = "https://github.com/geekan/MetaGPT"
url2 = "https://github.com/trending"
query = "What's new in metagpt"
resp = await research.WebBrowseAndSummarize(context=context).run(url, query=query)
assert len(resp) == 1
assert url in resp
assert resp[url] == "metagpt"
resp = await research.WebBrowseAndSummarize(context=context).run(url, url2, query=query)
assert len(resp) == 2
async def mock_llm_ask(*args, **kwargs):
return "Not relevant."
mocker.patch("metagpt.provider.base_llm.BaseLLM.aask", mock_llm_ask)
resp = await research.WebBrowseAndSummarize(context=context).run(url, query=query)
assert len(resp) == 1
assert url in resp
assert resp[url] is None
@pytest.mark.asyncio
async def test_conduct_research(mocker, context):
data = None
async def mock_llm_ask(*args, **kwargs):
nonlocal data
data = f"# Research Report\n## Introduction\n{args} {kwargs}"
return data
mocker.patch("metagpt.provider.base_llm.BaseLLM.aask", mock_llm_ask)
content = (
"MetaGPT takes a one line requirement as input and "
"outputs user stories / competitive analysis / requirements / data structures / APIs / documents, etc."
)
resp = await research.ConductResearch(context=context).run("The application of MetaGPT", content)
assert resp == data
async def mock_collect_links_llm_ask(self, prompt: str, system_msgs):
if "Please provide up to 2 necessary keywords" in prompt:
return '["metagpt", "llm"]'
elif "Provide up to 4 queries related to your research topic" in prompt:
return (
'["MetaGPT use cases", "The roadmap of MetaGPT", ' '"The function of MetaGPT", "What llm MetaGPT support"]'
)
elif "sort the remaining search results" in prompt:
return "[1,2]"
return ""
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_action.py | tests/metagpt/actions/test_action.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/11 14:43
@Author : alexanderwu
@File : test_action.py
"""
import pytest
from metagpt.actions import Action, ActionType, WritePRD, WriteTest
def test_action_repr():
actions = [Action(), WriteTest(), WritePRD()]
assert "WriteTest" in str(actions)
def test_action_type():
assert ActionType.WRITE_PRD.value == WritePRD
assert ActionType.WRITE_TEST.value == WriteTest
assert ActionType.WRITE_PRD.name == "WRITE_PRD"
assert ActionType.WRITE_TEST.name == "WRITE_TEST"
def test_simple_action():
action = Action(name="AlexSay", instruction="Express your opinion with emotion and don't repeat it")
assert action.name == "AlexSay"
assert action.node.instruction == "Express your opinion with emotion and don't repeat it"
def test_empty_action():
action = Action()
assert action.name == "Action"
assert not action.node
@pytest.mark.asyncio
async def test_empty_action_exception():
action = Action()
with pytest.raises(NotImplementedError):
await action.run()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_write_test.py | tests/metagpt/actions/test_write_test.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/11 17:45
@Author : alexanderwu
@File : test_write_test.py
"""
import pytest
from metagpt.actions.write_test import WriteTest
from metagpt.logs import logger
from metagpt.schema import Document, TestingContext
@pytest.mark.asyncio
async def test_write_test(context):
code = """
import random
from typing import Tuple
class Food:
def __init__(self, position: Tuple[int, int]):
self.position = position
def generate(self, max_y: int, max_x: int):
self.position = (random.randint(1, max_y - 1), random.randint(1, max_x - 1))
"""
testing_context = TestingContext(filename="food.py", code_doc=Document(filename="food.py", content=code))
write_test = WriteTest(i_context=testing_context, context=context)
context = await write_test.run()
logger.info(context.model_dump_json())
# We cannot exactly predict the generated test cases, but we can check if it is a string and if it is not empty
assert isinstance(context.test_doc.content, str)
assert "from food import Food" in context.test_doc.content
assert "class TestFood(unittest.TestCase)" in context.test_doc.content
assert "def test_generate" in context.test_doc.content
@pytest.mark.asyncio
async def test_write_code_invalid_code(mocker, context):
# Mock the _aask method to return an invalid code string
mocker.patch.object(WriteTest, "_aask", return_value="Invalid Code String")
# Create an instance of WriteTest
write_test = WriteTest(context=context)
# Call the write_code method
code = await write_test.write_code("Some prompt:")
# Assert that the returned code is the same as the invalid code string
assert code == "Invalid Code String"
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_action_node.py | tests/metagpt/actions/test_action_node.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/12/23 15:49
@Author : alexanderwu
@File : test_action_node.py
"""
from pathlib import Path
from typing import List, Optional, Tuple
import pytest
from pydantic import BaseModel, Field, ValidationError
from metagpt.actions import Action
from metagpt.actions.action_node import ActionNode, ReviewMode, ReviseMode
from metagpt.environment import Environment
from metagpt.llm import LLM
from metagpt.roles import Role
from metagpt.schema import Message
from metagpt.team import Team
from metagpt.utils.common import encode_image
@pytest.mark.asyncio
async def test_debate_two_roles():
action1 = Action(name="AlexSay", instruction="Express your opinion with emotion and don't repeat it")
action2 = Action(name="BobSay", instruction="Express your opinion with emotion and don't repeat it")
alex = Role(
name="Alex", profile="Democratic candidate", goal="Win the election", actions=[action1], watch=[action2]
)
bob = Role(name="Bob", profile="Republican candidate", goal="Win the election", actions=[action2], watch=[action1])
env = Environment(desc="US election live broadcast")
team = Team(investment=10.0, env=env, roles=[alex, bob])
history = await team.run(idea="Topic: climate change. Under 80 words per message.", send_to="Alex", n_round=3)
assert "Alex" in history
@pytest.mark.asyncio
async def test_debate_one_role_in_env():
action = Action(name="Debate", instruction="Express your opinion with emotion and don't repeat it")
alex = Role(name="Alex", profile="Democratic candidate", goal="Win the election", actions=[action])
env = Environment(desc="US election live broadcast")
team = Team(investment=10.0, env=env, roles=[alex])
history = await team.run(idea="Topic: climate change. Under 80 words per message.", send_to="Alex", n_round=3)
assert "Alex" in history
@pytest.mark.asyncio
async def test_debate_one_role():
action = Action(name="Debate", instruction="Express your opinion with emotion and don't repeat it")
alex = Role(name="Alex", profile="Democratic candidate", goal="Win the election", actions=[action])
msg: Message = await alex.run("Topic: climate change. Under 80 words per message.")
assert len(msg.content) > 10
assert msg.sent_from == "metagpt.roles.role.Role"
@pytest.mark.asyncio
async def test_action_node_one_layer():
node = ActionNode(key="key-a", expected_type=str, instruction="instruction-b", example="example-c")
raw_template = node.compile(context="123", schema="raw", mode="auto")
json_template = node.compile(context="123", schema="json", mode="auto")
markdown_template = node.compile(context="123", schema="markdown", mode="auto")
node_dict = node.to_dict()
assert "123" in raw_template
assert "instruction" in raw_template
assert "123" in json_template
assert "format example" in json_template
assert "constraint" in json_template
assert "action" in json_template
assert "[/" in json_template
assert "123" in markdown_template
assert "key-a" in markdown_template
assert node_dict["key-a"] == "instruction-b"
assert "key-a" in repr(node)
@pytest.mark.asyncio
async def test_action_node_two_layer():
node_a = ActionNode(key="reasoning", expected_type=str, instruction="reasoning step by step", example="")
node_b = ActionNode(key="answer", expected_type=str, instruction="the final answer", example="")
root = ActionNode.from_children(key="detail answer", nodes=[node_a, node_b])
assert "reasoning" in root.children
assert node_b in root.children.values()
# FIXME: ADD MARKDOWN SUPPORT. NEED TO TUNE MARKDOWN SYMBOL FIRST.
answer1 = await root.fill(req="what's the answer to 123+456?", schema="json", strgy="simple", llm=LLM())
assert "579" in answer1.content
answer2 = await root.fill(req="what's the answer to 123+456?", schema="json", strgy="complex", llm=LLM())
assert "579" in answer2.content
@pytest.mark.asyncio
async def test_action_node_review():
key = "Project Name"
node_a = ActionNode(
key=key,
expected_type=str,
instruction='According to the content of "Original Requirements," name the project using snake case style '
"with underline, like 'game_2048' or 'simple_crm.",
example="game_2048",
)
with pytest.raises(RuntimeError):
_ = await node_a.review()
_ = await node_a.fill(req=None, llm=LLM())
setattr(node_a.instruct_content, key, "game snake") # wrong content to review
review_comments = await node_a.review(review_mode=ReviewMode.AUTO)
assert len(review_comments) == 1
assert list(review_comments.keys())[0] == key
review_comments = await node_a.review(strgy="complex", review_mode=ReviewMode.AUTO)
assert len(review_comments) == 0
node = ActionNode.from_children(key="WritePRD", nodes=[node_a])
with pytest.raises(RuntimeError):
_ = await node.review()
_ = await node.fill(req=None, llm=LLM())
review_comments = await node.review(review_mode=ReviewMode.AUTO)
assert len(review_comments) == 1
assert list(review_comments.keys())[0] == key
review_comments = await node.review(strgy="complex", review_mode=ReviewMode.AUTO)
assert len(review_comments) == 1
assert list(review_comments.keys())[0] == key
@pytest.mark.asyncio
async def test_action_node_revise():
key = "Project Name"
node_a = ActionNode(
key=key,
expected_type=str,
instruction='According to the content of "Original Requirements," name the project using snake case style '
"with underline, like 'game_2048' or 'simple_crm.",
example="game_2048",
)
with pytest.raises(RuntimeError):
_ = await node_a.review()
_ = await node_a.fill(req=None, llm=LLM())
setattr(node_a.instruct_content, key, "game snake") # wrong content to revise
revise_contents = await node_a.revise(revise_mode=ReviseMode.AUTO)
assert len(revise_contents) == 1
assert "game_snake" in getattr(node_a.instruct_content, key)
revise_contents = await node_a.revise(strgy="complex", revise_mode=ReviseMode.AUTO)
assert len(revise_contents) == 0
node = ActionNode.from_children(key="WritePRD", nodes=[node_a])
with pytest.raises(RuntimeError):
_ = await node.revise()
_ = await node.fill(req=None, llm=LLM())
setattr(node.instruct_content, key, "game snake")
revise_contents = await node.revise(revise_mode=ReviseMode.AUTO)
assert len(revise_contents) == 1
assert "game_snake" in getattr(node.instruct_content, key)
revise_contents = await node.revise(strgy="complex", revise_mode=ReviseMode.AUTO)
assert len(revise_contents) == 1
assert "game_snake" in getattr(node.instruct_content, key)
t_dict = {
"Required Python third-party packages": '"""\nflask==1.1.2\npygame==2.0.1\n"""\n',
"Required Other language third-party packages": '"""\nNo third-party packages required for other languages.\n"""\n',
"Full API spec": '"""\nopenapi: 3.0.0\ninfo:\n title: Web Snake Game API\n version: 1.0.0\npaths:\n /game:\n get:\n summary: Get the current game state\n responses:\n \'200\':\n description: A JSON object of the game state\n post:\n summary: Send a command to the game\n requestBody:\n required: true\n content:\n application/json:\n schema:\n type: object\n properties:\n command:\n type: string\n responses:\n \'200\':\n description: A JSON object of the updated game state\n"""\n',
"Logic Analysis": [
["app.py", "Main entry point for the Flask application. Handles HTTP requests and responses."],
["game.py", "Contains the Game and Snake classes. Handles the game logic."],
["static/js/script.js", "Handles user interactions and updates the game UI."],
["static/css/styles.css", "Defines the styles for the game UI."],
["templates/index.html", "The main page of the web application. Displays the game UI."],
],
"Task list": ["game.py", "app.py", "static/css/styles.css", "static/js/script.js", "templates/index.html"],
"Shared Knowledge": "\"\"\"\n'game.py' contains the Game and Snake classes which are responsible for the game logic. The Game class uses an instance of the Snake class.\n\n'app.py' is the main entry point for the Flask application. It creates an instance of the Game class and handles HTTP requests and responses.\n\n'static/js/script.js' is responsible for handling user interactions and updating the game UI based on the game state returned by 'app.py'.\n\n'static/css/styles.css' defines the styles for the game UI.\n\n'templates/index.html' is the main page of the web application. It displays the game UI and loads 'static/js/script.js' and 'static/css/styles.css'.\n\"\"\"\n",
"Anything UNCLEAR": "We need clarification on how the high score should be stored. Should it persist across sessions (stored in a database or a file) or should it reset every time the game is restarted? Also, should the game speed increase as the snake grows, or should it remain constant throughout the game?",
}
t_dict_min = {
"Required Python third-party packages": '"""\nflask==1.1.2\npygame==2.0.1\n"""\n',
}
WRITE_TASKS_OUTPUT_MAPPING = {
"Required Python third-party packages": (str, ...),
"Required Other language third-party packages": (str, ...),
"Full API spec": (str, ...),
"Logic Analysis": (List[Tuple[str, str]], ...),
"Task list": (List[str], ...),
"Shared Knowledge": (str, ...),
"Anything UNCLEAR": (str, ...),
}
WRITE_TASKS_OUTPUT_MAPPING_MISSING = {
"Required Python third-party packages": (str, ...),
}
def test_create_model_class():
test_class = ActionNode.create_model_class("test_class", WRITE_TASKS_OUTPUT_MAPPING)
assert test_class.__name__ == "test_class"
output = test_class(**t_dict)
print(output.model_json_schema())
assert output.model_json_schema()["title"] == "test_class"
assert output.model_json_schema()["type"] == "object"
assert output.model_json_schema()["properties"]["Full API spec"]
def test_create_model_class_with_fields_unrecognized():
test_class = ActionNode.create_model_class("test_class", WRITE_TASKS_OUTPUT_MAPPING_MISSING)
assert test_class.__name__ == "test_class"
_ = test_class(**t_dict) # just warning
def test_create_model_class_with_fields_missing():
test_class = ActionNode.create_model_class("test_class", WRITE_TASKS_OUTPUT_MAPPING)
assert test_class.__name__ == "test_class"
with pytest.raises(ValidationError):
_ = test_class(**t_dict_min)
def test_create_model_class_with_mapping():
t = ActionNode.create_model_class("test_class_1", WRITE_TASKS_OUTPUT_MAPPING)
t1 = t(**t_dict)
value = t1.model_dump()["Task list"]
assert value == ["game.py", "app.py", "static/css/styles.css", "static/js/script.js", "templates/index.html"]
@pytest.mark.asyncio
async def test_action_node_with_image(mocker):
# add a mock to update model in unittest, due to the gloabl MockLLM
def _cons_kwargs(self, messages: list[dict], timeout=3, **extra_kwargs) -> dict:
kwargs = {"messages": messages, "temperature": 0.3, "model": "gpt-4-vision-preview"}
return kwargs
invoice = ActionNode(
key="invoice", expected_type=bool, instruction="if it's a invoice file, return True else False", example="False"
)
invoice_path = Path(__file__).parent.joinpath("..", "..", "data", "invoices", "invoice-2.png")
img_base64 = encode_image(invoice_path)
mocker.patch("metagpt.provider.openai_api.OpenAILLM._cons_kwargs", _cons_kwargs)
node = await invoice.fill(req="", llm=LLM(), images=[img_base64])
assert node.instruct_content.invoice
class ToolDef(BaseModel):
tool_name: str = Field(default="a", description="tool name", examples=[])
description: str = Field(default="b", description="tool description", examples=[])
class Task(BaseModel):
task_id: int = Field(default=1, description="task id", examples=[1, 2, 3])
name: str = Field(default="Get data from ...", description="task name", examples=[])
dependent_task_ids: List[int] = Field(default=[], description="dependent task ids", examples=[1, 2, 3])
tool: ToolDef = Field(default=ToolDef(), description="tool use", examples=[])
class Tasks(BaseModel):
tasks: List[Task] = Field(default=[], description="tasks", examples=[])
def test_action_node_from_pydantic_and_print_everything():
node = ActionNode.from_pydantic(Task)
print("1. Tasks")
print(Task().model_dump_json(indent=4))
print(Tasks.model_json_schema())
print("2. Task")
print(Task.model_json_schema())
print("3. ActionNode")
print(node)
print("4. node.compile prompt")
prompt = node.compile(context="")
assert "tool_name" in prompt, "tool_name should be in prompt"
print(prompt)
print("5. node.get_children_mapping")
print(node._get_children_mapping())
print("6. node.create_children_class")
children_class = node._create_children_class()
print(children_class)
import inspect
code = inspect.getsource(Tasks)
print(code)
assert "tasks" in code, "tasks should be in code"
def test_optional():
mapping = {
"Logic Analysis": (Optional[List[Tuple[str, str]]], Field(default=None)),
"Task list": (Optional[List[str]], None),
"Plan": (Optional[str], ""),
"Anything UNCLEAR": (Optional[str], None),
}
m = {"Anything UNCLEAR": "a"}
t = ActionNode.create_model_class("test_class_1", mapping)
t1 = t(**m)
assert t1
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_write_docstring.py | tests/metagpt/actions/test_write_docstring.py | import pytest
from metagpt.actions.write_docstring import WriteDocstring
code = """
def add_numbers(a: int, b: int):
return a + b
class Person:
def __init__(self, name: str, age: int):
self.name = name
self.age = age
def greet(self):
return f"Hello, my name is {self.name} and I am {self.age} years old."
"""
@pytest.mark.asyncio
@pytest.mark.parametrize(
("style", "part"),
[
("google", "Args:"),
("numpy", "Parameters"),
("sphinx", ":param name:"),
],
ids=["google", "numpy", "sphinx"],
)
async def test_write_docstring(style: str, part: str, context):
ret = await WriteDocstring(context=context).run(code, style=style)
assert part in ret
@pytest.mark.asyncio
async def test_write():
code = await WriteDocstring.write_docstring(__file__)
assert code
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_write_teaching_plan.py | tests/metagpt/actions/test_write_teaching_plan.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/7/28 17:25
@Author : mashenquan
@File : test_write_teaching_plan.py
"""
import pytest
from metagpt.actions.write_teaching_plan import WriteTeachingPlanPart
@pytest.mark.asyncio
@pytest.mark.parametrize(
("topic", "content"),
[("Title", "Lesson 1: Learn to draw an apple."), ("Teaching Content", "Lesson 1: Learn to draw an apple.")],
)
async def test_write_teaching_plan_part(topic, content, context):
action = WriteTeachingPlanPart(topic=topic, i_context=content, context=context)
rsp = await action.run()
assert rsp
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_design_api.py | tests/metagpt/actions/test_design_api.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/11 19:26
@Author : alexanderwu
@File : test_design_api.py
@Modifiled By: mashenquan, 2023-12-6. According to RFC 135
"""
from pathlib import Path
import pytest
from metagpt.actions.design_api import WriteDesign
from metagpt.const import DEFAULT_WORKSPACE_ROOT, METAGPT_ROOT
from metagpt.logs import logger
from metagpt.schema import AIMessage, Message
from metagpt.utils.project_repo import ProjectRepo
from tests.data.incremental_dev_project.mock import DESIGN_SAMPLE, REFINED_PRD_JSON
@pytest.mark.asyncio
async def test_design(context):
# Mock new design env
prd = "我们需要一个音乐播放器,它应该有播放、暂停、上一曲、下一曲等功能。"
context.kwargs.project_path = context.config.project_path
context.kwargs.inc = False
filename = "prd.txt"
repo = ProjectRepo(context.kwargs.project_path)
await repo.docs.prd.save(filename=filename, content=prd)
kvs = {
"project_path": str(context.kwargs.project_path),
"changed_prd_filenames": [str(repo.docs.prd.workdir / filename)],
}
instruct_content = AIMessage.create_instruct_value(kvs=kvs, class_name="WritePRDOutput")
design_api = WriteDesign(context=context)
result = await design_api.run([Message(content=prd, instruct_content=instruct_content)])
logger.info(result)
assert result
assert isinstance(result, AIMessage)
assert result.instruct_content
assert repo.docs.system_design.changed_files
# Mock incremental design env
context.kwargs.inc = True
await repo.docs.prd.save(filename=filename, content=str(REFINED_PRD_JSON))
await repo.docs.system_design.save(filename=filename, content=DESIGN_SAMPLE)
result = await design_api.run([Message(content="", instruct_content=instruct_content)])
logger.info(result)
assert result
assert isinstance(result, AIMessage)
assert result.instruct_content
assert repo.docs.system_design.changed_files
@pytest.mark.parametrize(
("user_requirement", "prd_filename", "legacy_design_filename"),
[
("我们需要一个音乐播放器,它应该有播放、暂停、上一曲、下一曲等功能。", None, None),
("write 2048 game", str(METAGPT_ROOT / "tests/data/prd.json"), None),
(
"write 2048 game",
str(METAGPT_ROOT / "tests/data/prd.json"),
str(METAGPT_ROOT / "tests/data/system_design.json"),
),
],
)
@pytest.mark.asyncio
async def test_design_api(context, user_requirement, prd_filename, legacy_design_filename):
action = WriteDesign()
result = await action.run(
user_requirement=user_requirement, prd_filename=prd_filename, legacy_design_filename=legacy_design_filename
)
assert isinstance(result, str)
assert result
assert str(DEFAULT_WORKSPACE_ROOT) in result
@pytest.mark.parametrize(
("user_requirement", "prd_filename", "legacy_design_filename"),
[
("我们需要一个音乐播放器,它应该有播放、暂停、上一曲、下一曲等功能。", None, None),
("write 2048 game", str(METAGPT_ROOT / "tests/data/prd.json"), None),
(
"write 2048 game",
str(METAGPT_ROOT / "tests/data/prd.json"),
str(METAGPT_ROOT / "tests/data/system_design.json"),
),
],
)
@pytest.mark.asyncio
async def test_design_api_dir(context, user_requirement, prd_filename, legacy_design_filename):
action = WriteDesign()
result = await action.run(
user_requirement=user_requirement,
prd_filename=prd_filename,
legacy_design_filename=legacy_design_filename,
output_pathname=str(Path(context.config.project_path) / "1.txt"),
)
assert isinstance(result, str)
assert result
assert str(context.config.project_path) in result
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_write_prd_an.py | tests/metagpt/actions/test_write_prd_an.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/01/03
@Author : mannaandpoem
@File : test_write_prd_an.py
"""
import pytest
from openai._models import BaseModel
from metagpt.actions.action_node import ActionNode
from metagpt.actions.write_prd import NEW_REQ_TEMPLATE
from metagpt.actions.write_prd_an import REFINED_PRD_NODE
from metagpt.llm import LLM
from tests.data.incremental_dev_project.mock import (
NEW_REQUIREMENT_SAMPLE,
PRD_SAMPLE,
REFINED_PRD_JSON,
)
@pytest.fixture()
def llm():
return LLM()
def mock_refined_prd_json():
return REFINED_PRD_JSON
@pytest.mark.asyncio
async def test_write_prd_an(mocker):
root = ActionNode.from_children("RefinedPRD", [ActionNode(key="", expected_type=str, instruction="", example="")])
root.instruct_content = BaseModel()
root.instruct_content.model_dump = mock_refined_prd_json
mocker.patch("metagpt.actions.write_prd_an.REFINED_PRD_NODE.fill", return_value=root)
prompt = NEW_REQ_TEMPLATE.format(
requirements=NEW_REQUIREMENT_SAMPLE,
old_prd=PRD_SAMPLE,
)
node = await REFINED_PRD_NODE.fill(req=prompt, llm=llm)
assert "Refined Requirements" in node.instruct_content.model_dump()
assert "Refined Product Goals" in node.instruct_content.model_dump()
assert "Refined User Stories" in node.instruct_content.model_dump()
assert "Refined Requirement Analysis" in node.instruct_content.model_dump()
assert "Refined Requirement Pool" in node.instruct_content.model_dump()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_write_code_plan_and_change_an.py | tests/metagpt/actions/test_write_code_plan_and_change_an.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/01/03
@Author : mannaandpoem
@File : test_write_code_plan_and_change_an.py
"""
import json
import pytest
from openai._models import BaseModel
from metagpt.actions.action_node import ActionNode
from metagpt.actions.write_code import WriteCode
from metagpt.actions.write_code_plan_and_change_an import (
REFINED_TEMPLATE,
WriteCodePlanAndChange,
)
from metagpt.logs import logger
from metagpt.schema import CodePlanAndChangeContext
from metagpt.utils.common import CodeParser
from tests.data.incremental_dev_project.mock import (
CODE_PLAN_AND_CHANGE_SAMPLE,
DESIGN_SAMPLE,
NEW_REQUIREMENT_SAMPLE,
REFINED_CODE_INPUT_SAMPLE,
REFINED_CODE_SAMPLE,
REFINED_DESIGN_JSON,
REFINED_PRD_JSON,
REFINED_TASK_JSON,
TASK_SAMPLE,
)
from tests.metagpt.actions.test_write_code import setup_inc_workdir
def mock_code_plan_and_change():
return CODE_PLAN_AND_CHANGE_SAMPLE
@pytest.mark.asyncio
async def test_write_code_plan_and_change_an(mocker, context, git_dir):
context = setup_inc_workdir(context, inc=True)
await context.repo.docs.prd.save(filename="2.json", content=json.dumps(REFINED_PRD_JSON))
await context.repo.docs.system_design.save(filename="2.json", content=json.dumps(REFINED_DESIGN_JSON))
await context.repo.docs.task.save(filename="2.json", content=json.dumps(REFINED_TASK_JSON))
await context.repo.with_src_path(context.repo.old_workspace).srcs.save(
filename="game.py", content=CodeParser.parse_code(text=REFINED_CODE_INPUT_SAMPLE)
)
root = ActionNode.from_children(
"WriteCodePlanAndChange", [ActionNode(key="", expected_type=str, instruction="", example="")]
)
root.instruct_content = BaseModel()
root.instruct_content.model_dump = mock_code_plan_and_change
mocker.patch(
"metagpt.actions.write_code_plan_and_change_an.WRITE_CODE_PLAN_AND_CHANGE_NODE.fill", return_value=root
)
code_plan_and_change_context = CodePlanAndChangeContext(
requirement="New requirement",
prd_filename="2.json",
design_filename="2.json",
task_filename="2.json",
)
node = await WriteCodePlanAndChange(i_context=code_plan_and_change_context, context=context).run()
assert "Development Plan" in node.instruct_content.model_dump()
assert "Incremental Change" in node.instruct_content.model_dump()
@pytest.mark.asyncio
async def test_refine_code(mocker):
mocker.patch.object(WriteCode, "_aask", return_value=REFINED_CODE_SAMPLE)
prompt = REFINED_TEMPLATE.format(
user_requirement=NEW_REQUIREMENT_SAMPLE,
code_plan_and_change=CODE_PLAN_AND_CHANGE_SAMPLE,
design=DESIGN_SAMPLE,
task=TASK_SAMPLE,
code=REFINED_CODE_INPUT_SAMPLE,
logs="",
feedback="",
filename="game.py",
summary_log="",
)
code = await WriteCode().write_code(prompt=prompt)
assert "def" in code
@pytest.mark.asyncio
async def test_get_old_code(context, git_dir):
context = setup_inc_workdir(context, inc=True)
await context.repo.with_src_path(context.repo.old_workspace).srcs.save(
filename="game.py", content=REFINED_CODE_INPUT_SAMPLE
)
code_plan_and_change_context = CodePlanAndChangeContext(
requirement="New requirement",
prd_filename="1.json",
design_filename="1.json",
task_filename="1.json",
)
action = WriteCodePlanAndChange(context=context, i_context=code_plan_and_change_context)
old_codes = await action.get_old_codes()
logger.info(old_codes)
assert "def" in old_codes
assert "class" in old_codes
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_rebuild_class_view.py | tests/metagpt/actions/test_rebuild_class_view.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/12/20
@Author : mashenquan
@File : test_rebuild_class_view.py
@Desc : Unit tests for rebuild_class_view.py
"""
from pathlib import Path
import pytest
from metagpt.actions.rebuild_class_view import RebuildClassView
from metagpt.llm import LLM
@pytest.mark.asyncio
async def test_rebuild(context):
action = RebuildClassView(
name="RedBean",
i_context=str(Path(__file__).parent.parent.parent.parent / "metagpt"),
llm=LLM(),
context=context,
)
await action.run()
rows = await action.graph_db.select()
assert rows
assert context.repo.docs.graph_repo.changed_files
@pytest.mark.parametrize(
("path", "direction", "diff", "want"),
[
("metagpt/software_company.py", "=", ".", "metagpt/software_company.py"),
("metagpt/software_company.py", "+", "MetaGPT", "MetaGPT/metagpt/software_company.py"),
("metagpt/software_company.py", "-", "metagpt", "software_company.py"),
],
)
def test_align_path(path, direction, diff, want):
res = RebuildClassView._align_root(path=path, direction=direction, diff_path=diff)
assert res == want
@pytest.mark.parametrize(
("path_root", "package_root", "want_direction", "want_diff"),
[
("/Users/x/github/MetaGPT/metagpt", "/Users/x/github/MetaGPT/metagpt", "=", "."),
("/Users/x/github/MetaGPT", "/Users/x/github/MetaGPT/metagpt", "-", "metagpt"),
("/Users/x/github/MetaGPT/metagpt", "/Users/x/github/MetaGPT", "+", "metagpt"),
(
"/Users/x/github/MetaGPT-env/lib/python3.9/site-packages/moviepy",
"/Users/x/github/MetaGPT-env/lib/python3.9/site-packages/",
"+",
"moviepy",
),
],
)
def test_diff_path(path_root, package_root, want_direction, want_diff):
direction, diff = RebuildClassView._diff_path(path_root=Path(path_root), package_root=Path(package_root))
assert direction == want_direction
assert diff == want_diff
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_project_management.py | tests/metagpt/actions/test_project_management.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/11 19:12
@Author : alexanderwu
@File : test_project_management.py
"""
import json
import pytest
from metagpt.actions.project_management import WriteTasks
from metagpt.const import METAGPT_ROOT
from metagpt.logs import logger
from metagpt.schema import AIMessage, Message
from metagpt.utils.project_repo import ProjectRepo
from tests.data.incremental_dev_project.mock import (
REFINED_DESIGN_JSON,
REFINED_PRD_JSON,
TASK_SAMPLE,
)
from tests.metagpt.actions.mock_json import DESIGN, PRD
@pytest.mark.asyncio
async def test_task(context):
# Mock write tasks env
context.kwargs.project_path = context.config.project_path
context.kwargs.inc = False
repo = ProjectRepo(context.kwargs.project_path)
filename = "1.txt"
await repo.docs.prd.save(filename=filename, content=str(PRD))
await repo.docs.system_design.save(filename=filename, content=str(DESIGN))
kvs = {
"project_path": context.kwargs.project_path,
"changed_system_design_filenames": [str(repo.docs.system_design.workdir / filename)],
}
instruct_content = AIMessage.create_instruct_value(kvs=kvs, class_name="WriteDesignOutput")
action = WriteTasks(context=context)
result = await action.run([Message(content="", instruct_content=instruct_content)])
logger.info(result)
assert result
assert result.instruct_content.changed_task_filenames
# Mock incremental env
context.kwargs.inc = True
await repo.docs.prd.save(filename=filename, content=str(REFINED_PRD_JSON))
await repo.docs.system_design.save(filename=filename, content=str(REFINED_DESIGN_JSON))
await repo.docs.task.save(filename=filename, content=TASK_SAMPLE)
result = await action.run([Message(content="", instruct_content=instruct_content)])
logger.info(result)
assert result
assert result.instruct_content.changed_task_filenames
@pytest.mark.asyncio
async def test_task_api(context):
action = WriteTasks()
result = await action.run(design_filename=str(METAGPT_ROOT / "tests/data/system_design.json"))
assert result
assert result.content
m = json.loads(result.content)
assert m
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_run_code.py | tests/metagpt/actions/test_run_code.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/11 17:46
@Author : alexanderwu
@File : test_run_code.py
@Modifiled By: mashenquan, 2023-12-6. According to RFC 135
"""
import pytest
from metagpt.actions.run_code import RunCode
from metagpt.schema import RunCodeContext
@pytest.mark.asyncio
async def test_run_text():
out, err = await RunCode.run_text("result = 1 + 1")
assert out == 2
assert err == ""
out, err = await RunCode.run_text("result = 1 / 0")
assert out == ""
assert "division by zero" in err
@pytest.mark.asyncio
async def test_run_script(context):
# Successful command
out, err = await RunCode(context=context).run_script(".", command=["echo", "Hello World"])
assert out.strip() == "Hello World"
assert err == ""
# Unsuccessful command
out, err = await RunCode(context=context).run_script(".", command=["python", "-c", "print(1/0)"])
assert "ZeroDivisionError" in err
@pytest.mark.asyncio
async def test_run(context):
inputs = [
(RunCodeContext(mode="text", code_filename="a.txt", code="result = 'helloworld'"), "PASS"),
(
RunCodeContext(
mode="script",
code_filename="a.sh",
code="echo 'Hello World'",
command=["echo", "Hello World"],
working_directory=".",
),
"PASS",
),
(
RunCodeContext(
mode="script",
code_filename="a.py",
code='python -c "print(1/0)"',
command=["python", "-c", "print(1/0)"],
working_directory=".",
),
"FAIL",
),
]
for ctx, result in inputs:
rsp = await RunCode(i_context=ctx, context=context).run()
assert result in rsp.summary
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_talk_action.py | tests/metagpt/actions/test_talk_action.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/12/28
@Author : mashenquan
@File : test_talk_action.py
"""
import pytest
from metagpt.actions.talk_action import TalkAction
from metagpt.schema import Message
@pytest.mark.asyncio
@pytest.mark.parametrize(
("agent_description", "language", "talk_context", "knowledge", "history_summary"),
[
(
"mathematician",
"English",
"How old is Susie?",
"Susie is a girl born in 2011/11/14. Today is 2023/12/3",
"balabala... (useless words)",
),
(
"mathematician",
"Chinese",
"Does Susie have an apple?",
"Susie is a girl born in 2011/11/14. Today is 2023/12/3",
"Susie had an apple, and she ate it right now",
),
],
)
async def test_prompt(agent_description, language, talk_context, knowledge, history_summary, context):
# Prerequisites
context.kwargs.agent_description = agent_description
context.kwargs.language = language
action = TalkAction(i_context=talk_context, knowledge=knowledge, history_summary=history_summary, context=context)
assert "{" not in action.prompt
assert "{" not in action.prompt_gpt4
rsp = await action.run()
assert rsp
assert isinstance(rsp, Message)
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_write_code_review.py | tests/metagpt/actions/test_write_code_review.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/11 17:45
@Author : alexanderwu
@File : test_write_code_review.py
"""
import pytest
from metagpt.actions.write_code_review import WriteCodeReview
from metagpt.schema import CodingContext, Document
@pytest.mark.asyncio
async def test_write_code_review(capfd, context):
context.src_workspace = context.repo.workdir / "srcs"
code = """
def add(a, b):
return a +
"""
coding_context = CodingContext(
filename="math.py", design_doc=Document(content="编写一个从a加b的函数,返回a+b"), code_doc=Document(content=code)
)
await WriteCodeReview(i_context=coding_context, context=context).run()
# 我们不能精确地预测生成的代码评审,但我们可以检查返回的是否为字符串
assert isinstance(coding_context.code_doc.content, str)
assert len(coding_context.code_doc.content) > 0
captured = capfd.readouterr()
print(f"输出内容: {captured.out}")
@pytest.mark.asyncio
async def test_write_code_review_inc(capfd, context):
context.src_workspace = context.repo.workdir / "srcs"
context.config.inc = True
code = """
def add(a, b):
return a +
"""
code_plan_and_change = """
def add(a, b):
- return a +
+ return a + b
"""
coding_context = CodingContext(
filename="math.py",
design_doc=Document(content="编写一个从a加b的函数,返回a+b"),
code_doc=Document(content=code),
code_plan_and_change_doc=Document(content=code_plan_and_change),
)
await WriteCodeReview(i_context=coding_context, context=context).run()
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_skill_action.py | tests/metagpt/actions/test_skill_action.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/9/19
@Author : mashenquan
@File : test_skill_action.py
@Desc : Unit tests.
"""
import pytest
from metagpt.actions.skill_action import ArgumentsParingAction, SkillAction
from metagpt.learn.skill_loader import Example, Parameter, Returns, Skill
class TestSkillAction:
skill = Skill(
name="text_to_image",
description="Create a drawing based on the text.",
id="text_to_image.text_to_image",
x_prerequisite={
"configurations": {
"OPENAI_API_KEY": {
"type": "string",
"description": "OpenAI API key, For more details, checkout: `https://platform.openai.com/account/api-keys`",
},
"metagpt_tti_url": {"type": "string", "description": "Model url."},
},
"required": {"oneOf": ["OPENAI_API_KEY", "metagpt_tti_url"]},
},
parameters={
"text": Parameter(type="string", description="The text used for image conversion."),
"size_type": Parameter(type="string", description="size type"),
},
examples=[
Example(ask="Draw a girl", answer='text_to_image(text="Draw a girl", size_type="512x512")'),
Example(ask="Draw an apple", answer='text_to_image(text="Draw an apple", size_type="512x512")'),
],
returns=Returns(type="string", format="base64"),
)
@pytest.mark.asyncio
async def test_parser(self):
args = ArgumentsParingAction.parse_arguments(
skill_name="text_to_image", txt='`text_to_image(text="Draw an apple", size_type="512x512")`'
)
assert args.get("text") == "Draw an apple"
assert args.get("size_type") == "512x512"
@pytest.mark.asyncio
async def test_parser_action(self, mocker, context):
# mock
mocker.patch("metagpt.learn.text_to_image", return_value="https://mock.com/xxx")
parser_action = ArgumentsParingAction(skill=self.skill, ask="Draw an apple", context=context)
rsp = await parser_action.run()
assert rsp
assert parser_action.args
assert parser_action.args.get("text") == "Draw an apple"
assert parser_action.args.get("size_type") == "512x512"
action = SkillAction(skill=self.skill, args=parser_action.args, context=context)
rsp = await action.run()
assert rsp
assert "image/png;base64," in rsp.content or "http" in rsp.content
@pytest.mark.parametrize(
("skill_name", "txt", "want"),
[
("skill1", 'skill1(a="1", b="2")', {"a": "1", "b": "2"}),
("skill1", '(a="1", b="2")', None),
("skill1", 'skill1(a="1", b="2"', None),
],
)
def test_parse_arguments(self, skill_name, txt, want):
args = ArgumentsParingAction.parse_arguments(skill_name, txt)
assert args == want
@pytest.mark.asyncio
async def test_find_and_call_function_error(self):
with pytest.raises(ValueError):
await SkillAction.find_and_call_function("dummy_call", {"a": 1})
@pytest.mark.asyncio
async def test_skill_action_error(self, context):
action = SkillAction(skill=self.skill, args={}, context=context)
rsp = await action.run()
assert "Error" in rsp.content
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/mock_markdown.py | tests/metagpt/actions/mock_markdown.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/18 23:51
@Author : alexanderwu
@File : mock_markdown.py
"""
PRD_SAMPLE = """## Original Requirements
The original requirement is to create a game similar to the classic text-based adventure game, Zork.
## Product Goals
```python
product_goals = [
"Create an engaging text-based adventure game",
"Ensure the game is easy to navigate and user-friendly",
"Incorporate compelling storytelling and puzzles"
]
```
## User Stories
```python
user_stories = [
"As a player, I want to be able to easily input commands so that I can interact with the game world",
"As a player, I want to explore various rooms and locations to uncover the game's story",
"As a player, I want to solve puzzles to progress in the game",
"As a player, I want to interact with various in-game objects to enhance my gameplay experience",
"As a player, I want a game that challenges my problem-solving skills and keeps me engaged"
]
```
## Competitive Analysis
```python
competitive_analysis = [
"Zork: The original text-based adventure game with complex puzzles and engaging storytelling",
"The Hitchhiker's Guide to the Galaxy: A text-based game with a unique sense of humor and challenging gameplay",
"Colossal Cave Adventure: The first text adventure game which set the standard for the genre",
"Quest: A platform that lets users create their own text adventure games",
"ChatGPT: An AI that can generate text-based adventure games",
"The Forest of Doom: A text-based game with a fantasy setting and multiple endings",
"Wizards Choice: A text-based game with RPG elements and a focus on player choice"
]
```
## Competitive Quadrant Chart
```mermaid
quadrantChart
title Reach and engagement of text-based adventure games
x-axis Low Reach --> High Reach
y-axis Low Engagement --> High Engagement
quadrant-1 High potential games
quadrant-2 Popular but less engaging games
quadrant-3 Less popular and less engaging games
quadrant-4 Popular and engaging games
"Zork": [0.9, 0.8]
"Hitchhiker's Guide": [0.7, 0.7]
"Colossal Cave Adventure": [0.8, 0.6]
"Quest": [0.4, 0.5]
"ChatGPT": [0.3, 0.6]
"Forest of Doom": [0.5, 0.4]
"Wizards Choice": [0.6, 0.5]
"Our Target Product": [0.5, 0.6]
```
## Requirement Analysis
The goal is to create a text-based adventure game similar to Zork. The game should be engaging, user-friendly, and feature compelling storytelling and puzzles. It should allow players to explore various rooms and locations, interact with in-game objects, and solve puzzles to progress. The game should also challenge players' problem-solving skills and keep them engaged.
## Requirement Pool
```python
requirement_pool = [
("Design an intuitive command input system for player interactions", "P0"),
("Create a variety of rooms and locations for players to explore", "P0"),
("Develop engaging puzzles that players need to solve to progress", "P0"),
("Incorporate a compelling story that unfolds as players explore the game world", "P1"),
("Ensure the game is user-friendly and easy to navigate", "P1")
]
```
## Anything UNCLEAR
The original requirement did not specify the platform for the game (web, mobile, desktop) or any specific features or themes for the game's story and puzzles. More information on these aspects could help in further refining the product requirements and design.
"""
DESIGN_LLM_KB_SEARCH_SAMPLE = """## Implementation approach:
The game will be developed as a console application in Python, which will allow it to be platform-independent. The game logic will be implemented using Object Oriented Programming principles.
The game will consist of different "rooms" or "locations" that the player can navigate. Each room will have different objects and puzzles that the player can interact with. The player's progress in the game will be determined by their ability to solve these puzzles.
Python's in-built data structures like lists and dictionaries will be used extensively to manage the game state, player inventory, room details, etc.
For testing, we can use the PyTest framework. This is a mature full-featured Python testing tool that helps you write better programs.
## Project Name:
```python
"adventure_game"
```
## File list:
```python
file_list = ["main.py", "room.py", "player.py", "game.py", "object.py", "puzzle.py", "test_game.py"]
```
## Data structures and interfaces:
```mermaid
classDiagram
class Room{
+__init__(self, description: str, objects: List[Object])
+get_description(self) -> str
+get_objects(self) -> List[Object]
}
class Player{
+__init__(self, current_room: Room, inventory: List[Object])
+move(self, direction: str) -> None
+get_current_room(self) -> Room
+get_inventory(self) -> List[Object]
}
class Object{
+__init__(self, name: str, description: str, is_usable: bool)
+get_name(self) -> str
+get_description(self) -> str
+is_usable(self) -> bool
}
class Puzzle{
+__init__(self, question: str, answer: str, reward: Object)
+ask_question(self) -> str
+check_answer(self, player_answer: str) -> bool
+get_reward(self) -> Object
}
class Game{
+__init__(self, player: Player)
+start(self) -> None
+end(self) -> None
}
Room "1" -- "*" Object
Player "1" -- "1" Room
Player "1" -- "*" Object
Puzzle "1" -- "1" Object
Game "1" -- "1" Player
```
## Program call flow:
```mermaid
sequenceDiagram
participant main as main.py
participant Game as Game
participant Player as Player
participant Room as Room
main->>Game: Game(player)
Game->>Player: Player(current_room, inventory)
Player->>Room: Room(description, objects)
Game->>Game: start()
Game->>Player: move(direction)
Player->>Room: get_description()
Game->>Player: get_inventory()
Game->>Game: end()
```
## Anything UNCLEAR:
The original requirements did not specify whether the game should have a save/load feature, multiplayer support, or any specific graphical user interface. More information on these aspects could help in further refining the product design and requirements.
"""
PROJECT_MANAGEMENT_SAMPLE = '''## Required Python third-party packages: Provided in requirements.txt format
```python
"pytest==6.2.5"
```
## Required Other language third-party packages: Provided in requirements.txt format
```python
```
## Full API spec: Use OpenAPI 3.0. Describe all APIs that may be used by both frontend and backend.
```python
"""
This project is a console-based application and doesn't require any API endpoints. All interactions will be done through the console interface.
"""
```
## Logic Analysis: Provided as a Python list[str, str]. the first is filename, the second is class/method/function should be implemented in this file. Analyze the dependencies between the files, which work should be done first
```python
[
("object.py", "Object"),
("room.py", "Room"),
("player.py", "Player"),
("puzzle.py", "Puzzle"),
("game.py", "Game"),
("main.py", "main"),
("test_game.py", "test_game")
]
```
## Task list: Provided as Python list[str]. Each str is a filename, the more at the beginning, the more it is a prerequisite dependency, should be done first
```python
[
"object.py",
"room.py",
"player.py",
"puzzle.py",
"game.py",
"main.py",
"test_game.py"
]
```
## Shared Knowledge: Anything that should be public like utils' functions, config's variables details that should make clear first.
```python
"""
Shared knowledge for this project includes understanding the basic principles of Object Oriented Programming, Python's built-in data structures like lists and dictionaries, and the PyTest framework for testing.
"""
```
## Anything UNCLEAR: Provide as Plain text. Try to clarify it. For example, don't forget a main entry. don't forget to init 3rd party libs.
```python
"""
The original requirements did not specify whether the game should have a save/load feature, multiplayer support, or any specific graphical user interface. More information on these aspects could help in further refining the product design and requirements.
"""
```
'''
WRITE_CODE_PROMPT_SAMPLE = """
你是一个工程师。下面是背景信息与你的当前任务,请为任务撰写代码。
撰写的代码应该符合PEP8,优雅,模块化,易于阅读与维护,代码本身应该有__main__入口来防止桩函数
## 用户编写程序所需的全部、详尽的文件路径列表(只需要相对路径,并不需要前缀,组织形式应该符合PEP规范)
- `main.py`: 主程序文件
- `search_engine.py`: 搜索引擎实现文件
- `knowledge_base.py`: 知识库管理文件
- `user_interface.py`: 用户界面文件
- `data_import.py`: 数据导入功能文件
- `data_export.py`: 数据导出功能文件
- `utils.py`: 工具函数文件
## 数据结构
- `KnowledgeBase`: 知识库类,用于管理私有知识库的内容、分类、标签和关键词。
- `SearchEngine`: 搜索引擎类,基于大语言模型,用于对用户输入的关键词或短语进行语义理解,并提供准确的搜索结果。
- `SearchResult`: 搜索结果类,包含与用户搜索意图相关的知识库内容的相关信息。
- `UserInterface`: 用户界面类,提供简洁、直观的用户界面,支持多种搜索方式和搜索结果的排序和过滤。
- `DataImporter`: 数据导入类,支持多种数据格式的导入功能,用于将外部数据导入到知识库中。
- `DataExporter`: 数据导出类,支持多种数据格式的导出功能,用于将知识库内容进行备份和分享。
## API接口
- `KnowledgeBase`类接口:
- `add_entry(entry: str, category: str, tags: List[str], keywords: List[str]) -> bool`: 添加知识库条目。
- `delete_entry(entry_id: str) -> bool`: 删除知识库条目。
- `update_entry(entry_id: str, entry: str, category: str, tags: List[str], keywords: List[str]) -> bool`: 更新知识库条目。
- `search_entries(query: str) -> List[str]`: 根据查询词搜索知识库条目。
- `SearchEngine`类接口:
- `search(query: str) -> SearchResult`: 根据用户查询词进行搜索,返回与查询意图相关的搜索结果。
- `UserInterface`类接口:
- `display_search_results(results: List[SearchResult]) -> None`: 显示搜索结果。
- `filter_results(results: List[SearchResult], filters: Dict[str, Any]) -> List[SearchResult]`: 根据过滤条件对搜索结果进行过滤。
- `sort_results(results: List[SearchResult], key: str, reverse: bool = False) -> List[SearchResult]`: 根据指定的键对搜索结果进行排序。
- `DataImporter`类接口:
- `import_data(file_path: str) -> bool`: 导入外部数据到知识库。
- `DataExporter`类接口:
- `export_data(file_path: str) -> bool`: 导出知识库数据到外部文件。
## 调用流程(以dot语言描述)
```dot
digraph call_flow {
rankdir=LR;
subgraph cluster_user_program {
label="User Program";
style=dotted;
main_py -> search_engine_py;
main_py -> knowledge_base_py;
main_py -> user_interface_py;
main_py -> data_import_py;
main_py -> data_export_py;
search_engine_py -> knowledge_base_py;
search_engine_py -> user_interface_py;
user_interface_py -> knowledge_base_py;
user_interface_py -> search_engine_py;
data_import_py -> knowledge_base_py;
data_import_py -> user_interface_py;
data_export_py -> knowledge_base_py;
data_export_py -> user_interface_py;
}
main_py [label="main.py"];
search_engine_py [label="search_engine.py"];
knowledge_base_py [label="knowledge_base.py"];
user_interface_py [label="user_interface.py"];
data_import_py [label="data_import.py"];
data_export_py [label="data_export.py"];
}
```
这是一个简化的调用流程图,展示了各个模块之间的调用关系。用户程序的`main.py`文件通过调用其他模块实现搜索引擎的功能。`search_engine.py`模块与`knowledge_base.py`和`user_interface.py`模块进行交互,实现搜索算法和搜索结果的展示。`data_import.py`和`data_export.py`模块与`knowledge_base.py`和`user_interface.py`模块进行交互,实现数据导入和导出的功能。用户界面模块`user_interface.py`与其他模块进行交互,提供简洁、直观的用户界面,并支持搜索方式、排序和过滤等操作。
## 当前任务
"""
TASKS = [
"添加数据API:接受用户输入的文档库,对文档库进行索引\n- 使用MeiliSearch连接并添加文档库",
"搜索API:接收用户输入的关键词,返回相关的搜索结果\n- 使用MeiliSearch连接并使用接口获得对应数据",
"多条件筛选API:接收用户选择的筛选条件,返回符合条件的搜索结果。\n- 使用MeiliSearch进行筛选并返回符合条件的搜索结果",
"智能推荐API:根据用户的搜索历史记录和搜索行为,推荐相关的搜索结果。",
]
TASKS_2 = ["完成main.py的功能"]
SEARCH_CODE_SAMPLE = """
import requests
class SearchAPI:
def __init__(self, elastic_search_url):
self.elastic_search_url = elastic_search_url
def search(self, keyword):
# 构建搜索请求的参数
params = {
'q': keyword,
'size': 10 # 返回结果数量
}
try:
# 发送搜索请求
response = requests.get(self.elastic_search_url, params=params)
if response.status_code == 200:
# 解析搜索结果
search_results = response.json()
formatted_results = self.format_results(search_results)
return formatted_results
else:
print('Error: Failed to retrieve search results.')
except requests.exceptions.RequestException as e:
print(f'Error: {e}')
def format_results(self, search_results):
formatted_results = []
hits = search_results.get('hits', {}).get('hits', [])
for hit in hits:
result = hit.get('_source', {})
title = result.get('title', '')
summary = result.get('summary', '')
url = result.get('url', '')
formatted_results.append({
'title': title,
'summary': summary,
'url': url
})
return formatted_results
if __name__ == '__main__':
# 使用示例
elastic_search_url = 'http://localhost:9200/search'
search_api = SearchAPI(elastic_search_url)
keyword = input('Enter search keyword: ')
results = search_api.search(keyword)
if results:
for result in results:
print(result)
else:
print('No results found.')
"""
REFINED_CODE = '''
import requests
class SearchAPI:
def __init__(self, elastic_search_url):
"""
初始化SearchAPI对象。
Args:
elastic_search_url (str): ElasticSearch的URL。
"""
self.elastic_search_url = elastic_search_url
def search(self, keyword, size=10):
"""
搜索关键词并返回相关的搜索结果。
Args:
keyword (str): 用户输入的搜索关键词。
size (int): 返回结果数量,默认为10。
Returns:
list: 包含搜索结果的列表,每个结果是一个字典,包含标题、摘要和URL等信息。如果没有搜索结果,返回一个空列表。
"""
# 构建搜索请求的参数
params = {
'q': keyword,
'size': size
}
try:
# 发送搜索请求
response = requests.get(self.elastic_search_url, params=params)
response.raise_for_status()
# 解析搜索结果
search_results = response.json()
formatted_results = self.format_results(search_results)
return formatted_results
except requests.exceptions.RequestException as e:
print(f'Error: {e}')
return None
def format_results(self, search_results):
"""
格式化搜索结果。
Args:
search_results (dict): ElasticSearch返回的搜索结果。
Returns:
list: 包含格式化搜索结果的列表,每个结果是一个字典,包含标题、摘要和URL等信息。如果搜索结果为空,返回None。
"""
if not isinstance(search_results, dict):
return None
formatted_results = []
hits = search_results.get('hits', {}).get('hits', [])
for hit in hits:
result = hit.get('_source', {})
title = result.get('title', '')
summary = result.get('summary', '')
url = result.get('url', '')
formatted_results.append({
'title': title,
'summary': summary,
'url': url
})
return formatted_results if formatted_results else None
if __name__ == '__main__':
# 使用示例
elastic_search_url = 'http://localhost:9200/search'
search_api = SearchAPI(elastic_search_url)
keyword = input('Enter search keyword: ')
results = search_api.search(keyword)
if results:
for result in results:
print(result)
else:
print('No results found.')
'''
MEILI_CODE = """import meilisearch
from typing import List
class DataSource:
def __init__(self, name: str, url: str):
self.name = name
self.url = url
class SearchEngine:
def __init__(self):
self.client = meilisearch.Client('http://localhost:7700') # MeiliSearch服务器的URL
def add_documents(self, data_source: DataSource, documents: List[dict]):
index_name = f"{data_source.name}_index"
index = self.client.get_or_create_index(index_name)
index.add_documents(documents)
# 示例用法
if __name__ == '__main__':
search_engine = SearchEngine()
# 假设有一个名为"books"的数据源,包含要添加的文档库
books_data_source = DataSource(name='books', url='https://example.com/books')
# 假设有一个名为"documents"的文档库,包含要添加的文档
documents = [
{"id": 1, "title": "Book 1", "content": "This is the content of Book 1."},
{"id": 2, "title": "Book 2", "content": "This is the content of Book 2."},
# 其他文档...
]
# 添加文档库到搜索引擎
search_engine.add_documents(books_data_source, documents)
"""
MEILI_ERROR = """/usr/local/bin/python3.9 /Users/alexanderwu/git/metagpt/examples/search/meilisearch_index.py
Traceback (most recent call last):
File "/Users/alexanderwu/git/metagpt/examples/search/meilisearch_index.py", line 44, in <module>
search_engine.add_documents(books_data_source, documents)
File "/Users/alexanderwu/git/metagpt/examples/search/meilisearch_index.py", line 25, in add_documents
index = self.client.get_or_create_index(index_name)
AttributeError: 'Client' object has no attribute 'get_or_create_index'
Process finished with exit code 1"""
MEILI_CODE_REFINED = """
"""
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/__init__.py | tests/metagpt/actions/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/11 19:35
@Author : alexanderwu
@File : __init__.py
"""
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_project_management_an.py | tests/metagpt/actions/test_project_management_an.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/01/03
@Author : mannaandpoem
@File : test_project_management_an.py
"""
import pytest
from openai._models import BaseModel
from metagpt.actions.action_node import ActionNode, dict_to_markdown
from metagpt.actions.project_management import NEW_REQ_TEMPLATE
from metagpt.actions.project_management_an import PM_NODE, REFINED_PM_NODE
from metagpt.llm import LLM
from tests.data.incremental_dev_project.mock import (
REFINED_DESIGN_JSON,
REFINED_TASK_JSON,
TASK_SAMPLE,
)
from tests.metagpt.actions.mock_json import TASK
@pytest.fixture()
def llm():
return LLM()
def mock_refined_task_json():
return REFINED_TASK_JSON
def mock_task_json():
return TASK
@pytest.mark.asyncio
async def test_project_management_an(mocker):
root = ActionNode.from_children(
"ProjectManagement", [ActionNode(key="", expected_type=str, instruction="", example="")]
)
root.instruct_content = BaseModel()
root.instruct_content.model_dump = mock_task_json
mocker.patch("metagpt.actions.project_management_an.PM_NODE.fill", return_value=root)
node = await PM_NODE.fill(req=dict_to_markdown(REFINED_DESIGN_JSON), llm=llm)
assert "Logic Analysis" in node.instruct_content.model_dump()
assert "Task list" in node.instruct_content.model_dump()
assert "Shared Knowledge" in node.instruct_content.model_dump()
@pytest.mark.asyncio
async def test_project_management_an_inc(mocker):
root = ActionNode.from_children(
"RefinedProjectManagement", [ActionNode(key="", expected_type=str, instruction="", example="")]
)
root.instruct_content = BaseModel()
root.instruct_content.model_dump = mock_refined_task_json
mocker.patch("metagpt.actions.project_management_an.REFINED_PM_NODE.fill", return_value=root)
prompt = NEW_REQ_TEMPLATE.format(old_task=TASK_SAMPLE, context=dict_to_markdown(REFINED_DESIGN_JSON))
node = await REFINED_PM_NODE.fill(req=prompt, llm=llm)
assert "Refined Logic Analysis" in node.instruct_content.model_dump()
assert "Refined Task list" in node.instruct_content.model_dump()
assert "Refined Shared Knowledge" in node.instruct_content.model_dump()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_action_multi_llm.py | tests/metagpt/actions/test_action_multi_llm.py | from metagpt.actions.action import Action
from metagpt.config2 import Config
from metagpt.const import TEST_DATA_PATH
from metagpt.context import Context
from metagpt.provider.llm_provider_registry import create_llm_instance
from metagpt.roles.role import Role
def test_set_llm():
config1 = Config.default()
config2 = Config.default()
config2.llm.model = "gpt-3.5-turbo"
context = Context(config=config1)
act = Action(context=context)
assert act.config.llm.model == config1.llm.model
llm2 = create_llm_instance(config2.llm)
act.llm = llm2
assert act.llm.model == llm2.model
role = Role(context=context)
role.set_actions([act])
assert act.llm.model == llm2.model
role1 = Role(context=context)
act1 = Action(context=context)
assert act1.config.llm.model == config1.llm.model
act1.config = config2
role1.set_actions([act1])
assert act1.llm.model == llm2.model
# multiple LLM
config3_path = TEST_DATA_PATH / "config/config2_multi_llm.yaml"
dict3 = Config.read_yaml(config3_path)
config3 = Config(**dict3)
context3 = Context(config=config3)
role3 = Role(context=context3)
act3 = Action(context=context3, llm_name_or_type="YOUR_MODEL_NAME_1")
assert act3.config.llm.model == "gpt-3.5-turbo"
assert act3.llm.model == "gpt-4-turbo"
role3.set_actions([act3])
assert act3.config.llm.model == "gpt-3.5-turbo"
assert act3.llm.model == "gpt-4-turbo"
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_write_code.py | tests/metagpt/actions/test_write_code.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/11 17:45
@Author : alexanderwu
@File : test_write_code.py
@Modifiled By: mashenquan, 2023-12-6. According to RFC 135
"""
import json
from pathlib import Path
import pytest
from metagpt.actions.write_code import WriteCode
from metagpt.logs import logger
from metagpt.schema import CodingContext, Document
from metagpt.utils.common import CodeParser, aread
from tests.data.incremental_dev_project.mock import (
CODE_PLAN_AND_CHANGE_SAMPLE,
REFINED_CODE_INPUT_SAMPLE,
REFINED_DESIGN_JSON,
REFINED_TASK_JSON,
)
from tests.metagpt.actions.mock_markdown import TASKS_2, WRITE_CODE_PROMPT_SAMPLE
def setup_inc_workdir(context, inc: bool = False):
"""setup incremental workdir for testing"""
context.config.inc = inc
return context
@pytest.mark.asyncio
async def test_write_code(context):
# Prerequisites
context.src_workspace = context.git_repo.workdir / "writecode"
coding_ctx = CodingContext(
filename="task_filename.py", design_doc=Document(content="设计一个名为'add'的函数,该函数接受两个整数作为输入,并返回它们的和。")
)
doc = Document(content=coding_ctx.model_dump_json())
write_code = WriteCode(i_context=doc, context=context)
code = await write_code.run()
logger.info(code.model_dump_json())
# 我们不能精确地预测生成的代码,但我们可以检查某些关键字
assert "def add" in code.code_doc.content
assert "return" in code.code_doc.content
@pytest.mark.asyncio
async def test_write_code_directly(context):
prompt = WRITE_CODE_PROMPT_SAMPLE + "\n" + TASKS_2[0]
llm = context.llm_with_cost_manager_from_llm_config(context.config.llm)
rsp = await llm.aask(prompt)
logger.info(rsp)
@pytest.mark.asyncio
async def test_write_code_deps(context):
# Prerequisites
context.src_workspace = context.git_repo.workdir / "snake1/snake1"
demo_path = Path(__file__).parent / "../../data/demo_project"
await context.repo.test_outputs.save(
filename="test_game.py.json", content=await aread(str(demo_path / "test_game.py.json"))
)
await context.repo.docs.code_summary.save(
filename="20231221155954.json",
content=await aread(str(demo_path / "code_summaries.json")),
)
await context.repo.docs.system_design.save(
filename="20231221155954.json",
content=await aread(str(demo_path / "system_design.json")),
)
await context.repo.docs.task.save(
filename="20231221155954.json", content=await aread(str(demo_path / "tasks.json"))
)
await context.repo.with_src_path(context.src_workspace).srcs.save(
filename="main.py", content='if __name__ == "__main__":\nmain()'
)
ccontext = CodingContext(
filename="game.py",
design_doc=await context.repo.docs.system_design.get(filename="20231221155954.json"),
task_doc=await context.repo.docs.task.get(filename="20231221155954.json"),
code_doc=Document(filename="game.py", content="", root_path="snake1"),
)
coding_doc = Document(root_path="snake1", filename="game.py", content=ccontext.json())
action = WriteCode(i_context=coding_doc, context=context)
rsp = await action.run()
assert rsp
assert rsp.code_doc.content
@pytest.mark.asyncio
async def test_write_refined_code(context, git_dir):
# Prerequisites
context = setup_inc_workdir(context, inc=True)
await context.repo.docs.system_design.save(filename="1.json", content=json.dumps(REFINED_DESIGN_JSON))
await context.repo.docs.task.save(filename="1.json", content=json.dumps(REFINED_TASK_JSON))
await context.repo.docs.code_plan_and_change.save(
filename="1.json", content=json.dumps(CODE_PLAN_AND_CHANGE_SAMPLE)
)
# old_workspace contains the legacy code
await context.repo.with_src_path(context.repo.old_workspace).srcs.save(
filename="game.py", content=CodeParser.parse_code(text=REFINED_CODE_INPUT_SAMPLE)
)
ccontext = CodingContext(
filename="game.py",
design_doc=await context.repo.docs.system_design.get(filename="1.json"),
task_doc=await context.repo.docs.task.get(filename="1.json"),
code_plan_and_change_doc=await context.repo.docs.code_plan_and_change.get(filename="1.json"),
code_doc=Document(filename="game.py", content="", root_path="src"),
)
coding_doc = Document(root_path="src", filename="game.py", content=ccontext.json())
action = WriteCode(i_context=coding_doc, context=context)
rsp = await action.run()
assert rsp
assert rsp.code_doc.content
@pytest.mark.asyncio
async def test_get_codes(context):
# Prerequisites
context = setup_inc_workdir(context, inc=True)
for filename in ["game.py", "ui.py"]:
await context.repo.with_src_path(context.src_workspace).srcs.save(
filename=filename, content=f"# {filename}\nnew code ..."
)
await context.repo.with_src_path(context.repo.old_workspace).srcs.save(
filename=filename, content=f"# {filename}\nlegacy code ..."
)
await context.repo.with_src_path(context.repo.old_workspace).srcs.save(
filename="gui.py", content="# gui.py\nlegacy code ..."
)
await context.repo.with_src_path(context.repo.old_workspace).srcs.save(
filename="main.py", content='# main.py\nif __name__ == "__main__":\n main()'
)
task_doc = Document(filename="1.json", content=json.dumps(REFINED_TASK_JSON))
context.repo = context.repo.with_src_path(context.src_workspace)
# Ready to write gui.py
codes = await WriteCode.get_codes(task_doc=task_doc, exclude="gui.py", project_repo=context.repo)
codes_inc = await WriteCode.get_codes(task_doc=task_doc, exclude="gui.py", project_repo=context.repo, use_inc=True)
logger.info(codes)
logger.info(codes_inc)
assert codes
assert codes_inc
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_write_prd.py | tests/metagpt/actions/test_write_prd.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/11 17:45
@Author : alexanderwu
@File : test_write_prd.py
@Modified By: mashenquan, 2023-11-1. According to Chapter 2.2.1 and 2.2.2 of RFC 116, replace `handle` with `run`.
"""
import uuid
from pathlib import Path
import pytest
from metagpt.actions import UserRequirement, WritePRD
from metagpt.const import DEFAULT_WORKSPACE_ROOT, REQUIREMENT_FILENAME
from metagpt.logs import logger
from metagpt.roles.product_manager import ProductManager
from metagpt.roles.role import RoleReactMode
from metagpt.schema import Message
from metagpt.utils.common import any_to_str
from metagpt.utils.project_repo import ProjectRepo
from tests.data.incremental_dev_project.mock import NEW_REQUIREMENT_SAMPLE
@pytest.mark.asyncio
async def test_write_prd(new_filename, context):
product_manager = ProductManager(context=context)
requirements = "开发一个基于大语言模型与私有知识库的搜索引擎,希望可以基于大语言模型进行搜索总结"
product_manager.rc.react_mode = RoleReactMode.BY_ORDER
prd = await product_manager.run(Message(content=requirements, cause_by=UserRequirement))
assert prd.cause_by == any_to_str(WritePRD)
logger.info(requirements)
logger.info(prd)
# Assert the prd is not None or empty
assert prd is not None
assert prd.content != ""
repo = ProjectRepo(context.kwargs.project_path)
assert repo.docs.prd.changed_files
repo.git_repo.archive()
# Mock incremental requirement
context.config.inc = True
context.config.project_path = context.kwargs.project_path
repo = ProjectRepo(context.config.project_path)
await repo.docs.save(filename=REQUIREMENT_FILENAME, content=NEW_REQUIREMENT_SAMPLE)
action = WritePRD(context=context)
prd = await action.run([Message(content=NEW_REQUIREMENT_SAMPLE, instruct_content=None)])
logger.info(NEW_REQUIREMENT_SAMPLE)
logger.info(prd)
# Assert the prd is not None or empty
assert prd is not None
assert prd.content != ""
assert repo.git_repo.changed_files
@pytest.mark.asyncio
async def test_fix_debug(new_filename, context, git_dir):
# Mock legacy project
context.kwargs.project_path = str(git_dir)
repo = ProjectRepo(context.kwargs.project_path)
repo.with_src_path(git_dir.name)
await repo.srcs.save(filename="main.py", content='if __name__ == "__main__":\nmain()')
requirements = "ValueError: undefined variable `st`."
await repo.docs.save(filename=REQUIREMENT_FILENAME, content=requirements)
action = WritePRD(context=context)
prd = await action.run([Message(content=requirements, instruct_content=None)])
logger.info(prd)
# Assert the prd is not None or empty
assert prd is not None
assert prd.content != ""
@pytest.mark.asyncio
async def test_write_prd_api(context):
action = WritePRD()
result = await action.run(user_requirement="write a snake game.")
assert isinstance(result, str)
assert result
assert str(DEFAULT_WORKSPACE_ROOT) in result
result = await action.run(
user_requirement="write a snake game.",
output_pathname=str(Path(context.config.project_path) / f"{uuid.uuid4().hex}.json"),
)
assert isinstance(result, str)
assert result
assert str(context.config.project_path) in result
ix = result.find(":")
legacy_prd_filename = result[ix + 1 :].replace('"', "").strip()
result = await action.run(user_requirement="Add moving enemy.", legacy_prd_filename=legacy_prd_filename)
assert isinstance(result, str)
assert result
assert str(DEFAULT_WORKSPACE_ROOT) in result
result = await action.run(
user_requirement="Add moving enemy.",
output_pathname=str(Path(context.config.project_path) / f"{uuid.uuid4().hex}.json"),
legacy_prd_filename=legacy_prd_filename,
)
assert isinstance(result, str)
assert result
assert str(context.config.project_path) in result
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_import_repo.py | tests/metagpt/actions/test_import_repo.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from metagpt.actions.import_repo import ImportRepo
from metagpt.context import Context
from metagpt.utils.common import list_files
@pytest.mark.asyncio
@pytest.mark.parametrize(
"repo_path",
[
"https://github.com/spec-first/connexion.git",
# "https://github.com/geekan/MetaGPT.git"
],
)
@pytest.mark.skip
async def test_import_repo(repo_path):
context = Context()
action = ImportRepo(repo_path=repo_path, context=context)
await action.run()
assert context.repo
prd = list_files(context.repo.docs.prd.workdir)
assert prd
design = list_files(context.repo.docs.system_design.workdir)
assert design
assert prd[0].stem == design[0].stem
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_extract_readme.py | tests/metagpt/actions/test_extract_readme.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from pathlib import Path
import pytest
from metagpt.actions.extract_readme import ExtractReadMe
from metagpt.llm import LLM
@pytest.mark.asyncio
async def test_learn_readme(context):
action = ExtractReadMe(
name="RedBean",
i_context=str(Path(__file__).parent.parent.parent.parent),
llm=LLM(),
context=context,
)
await action.run()
rows = await action.graph_db.select()
assert rows
assert context.repo.docs.graph_repo.changed_files
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/test_action_outcls_registry.py | tests/metagpt/actions/test_action_outcls_registry.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : unittest of action_outcls_registry
from typing import List
from metagpt.actions.action_node import ActionNode
def test_action_outcls_registry():
class_name = "test"
out_mapping = {"field": (list[str], ...), "field1": (str, ...)}
out_data = {"field": ["field value1", "field value2"], "field1": "field1 value1"}
outcls = ActionNode.create_model_class(class_name, mapping=out_mapping)
outinst = outcls(**out_data)
outcls1 = ActionNode.create_model_class(class_name=class_name, mapping=out_mapping)
outinst1 = outcls1(**out_data)
assert outinst1 == outinst
outcls2 = ActionNode(key="", expected_type=str, instruction="", example="").create_model_class(
class_name, out_mapping
)
outinst2 = outcls2(**out_data)
assert outinst2 == outinst
out_mapping = {"field1": (str, ...), "field": (list[str], ...)} # different order
outcls3 = ActionNode.create_model_class(class_name=class_name, mapping=out_mapping)
outinst3 = outcls3(**out_data)
assert outinst3 == outinst
out_mapping2 = {"field1": (str, ...), "field": (List[str], ...)} # typing case
outcls4 = ActionNode.create_model_class(class_name=class_name, mapping=out_mapping2)
outinst4 = outcls4(**out_data)
assert outinst4 == outinst
out_data2 = {"field2": ["field2 value1", "field2 value2"], "field1": "field1 value1"}
out_mapping = {"field1": (str, ...), "field2": (List[str], ...)} # List first
outcls5 = ActionNode.create_model_class(class_name, out_mapping)
outinst5 = outcls5(**out_data2)
out_mapping = {"field1": (str, ...), "field2": (list[str], ...)}
outcls6 = ActionNode.create_model_class(class_name, out_mapping)
outinst6 = outcls6(**out_data2)
assert outinst5 == outinst6
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/requirement_analysis/__init__.py | tests/metagpt/actions/requirement_analysis/__init__.py | python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false | |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/requirement_analysis/requirement/test_pic2txt.py | tests/metagpt/actions/requirement_analysis/requirement/test_pic2txt.py | import pytest
from metagpt.actions.requirement_analysis.requirement.pic2txt import Pic2Txt
from metagpt.const import TEST_DATA_PATH
from metagpt.utils.common import aread
@pytest.mark.asyncio
async def test_pic2txt(context):
images = [
TEST_DATA_PATH / "requirements/pic/1.png",
TEST_DATA_PATH / "requirements/pic/2.png",
TEST_DATA_PATH / "requirements/pic/3.png",
]
textual_user_requirements = await aread(filename=TEST_DATA_PATH / "requirements/1.original_requirement.txt")
action = Pic2Txt(context=context)
rsp = await action.run(
image_paths=images,
textual_user_requirement=textual_user_requirements,
)
assert rsp
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/requirement_analysis/requirement/__init__.py | tests/metagpt/actions/requirement_analysis/requirement/__init__.py | python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false | |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/di/test_execute_nb_code.py | tests/metagpt/actions/di/test_execute_nb_code.py | import pytest
from metagpt.actions.di.execute_nb_code import ExecuteNbCode
@pytest.mark.asyncio
async def test_code_running():
executor = ExecuteNbCode()
output, is_success = await executor.run("print('hello world!')")
assert is_success
await executor.terminate()
@pytest.mark.asyncio
async def test_split_code_running():
executor = ExecuteNbCode()
_ = await executor.run("x=1\ny=2")
_ = await executor.run("z=x+y")
output, is_success = await executor.run("assert z==3")
assert is_success
await executor.terminate()
@pytest.mark.asyncio
async def test_execute_error():
executor = ExecuteNbCode()
output, is_success = await executor.run("z=1/0")
assert not is_success
await executor.terminate()
PLOT_CODE = """
import numpy as np
import matplotlib.pyplot as plt
# 生成随机数据
random_data = np.random.randn(1000) # 生成1000个符合标准正态分布的随机数
# 绘制直方图
plt.hist(random_data, bins=30, density=True, alpha=0.7, color='blue', edgecolor='black')
# 添加标题和标签
plt.title('Histogram of Random Data')
plt.xlabel('Value')
plt.ylabel('Frequency')
# 显示图形
plt.show()
plt.close()
"""
@pytest.mark.asyncio
async def test_plotting_code():
executor = ExecuteNbCode()
output, is_success = await executor.run(PLOT_CODE)
assert is_success
await executor.terminate()
@pytest.mark.asyncio
async def test_run_with_timeout():
executor = ExecuteNbCode(timeout=1)
code = "import time; time.sleep(2)"
message, success = await executor.run(code)
assert not success
assert message.startswith("Cell execution timed out")
await executor.terminate()
@pytest.mark.asyncio
async def test_run_code_text():
executor = ExecuteNbCode()
message, success = await executor.run(code='print("This is a code!")', language="python")
assert success
assert "This is a code!" in message
message, success = await executor.run(code="# This is a code!", language="markdown")
assert success
assert message == "# This is a code!"
mix_text = "# Title!\n ```python\n print('This is a code!')```"
message, success = await executor.run(code=mix_text, language="markdown")
assert success
assert message == mix_text
await executor.terminate()
@pytest.mark.asyncio
@pytest.mark.parametrize(
"k", [(1), (5)]
) # k=1 to test a single regular terminate, k>1 to test terminate under continuous run
async def test_terminate(k):
for _ in range(k):
executor = ExecuteNbCode()
await executor.run(code='print("This is a code!")', language="python")
is_kernel_alive = await executor.nb_client.km.is_alive()
assert is_kernel_alive
await executor.terminate()
assert executor.nb_client.km is None
assert executor.nb_client.kc is None
@pytest.mark.asyncio
async def test_reset():
executor = ExecuteNbCode()
await executor.run(code='print("This is a code!")', language="python")
is_kernel_alive = await executor.nb_client.km.is_alive()
assert is_kernel_alive
await executor.reset()
assert executor.nb_client.km is None
await executor.terminate()
@pytest.mark.asyncio
async def test_parse_outputs():
executor = ExecuteNbCode()
code = """
import pandas as pd
df = pd.DataFrame({'ID': [1,2,3], 'NAME': ['a', 'b', 'c']})
print(df.columns)
print(f"columns num:{len(df.columns)}")
print(df['DUMMPY_ID'])
"""
output, is_success = await executor.run(code)
assert not is_success
assert "Index(['ID', 'NAME'], dtype='object')" in output
assert "KeyError: 'DUMMPY_ID'" in output
assert "columns num:2" in output
await executor.terminate()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/di/test_ask_review.py | tests/metagpt/actions/di/test_ask_review.py | import pytest
from metagpt.actions.di.ask_review import AskReview
@pytest.mark.asyncio
async def test_ask_review(mocker):
mock_review_input = "confirm"
mocker.patch("metagpt.actions.di.ask_review.get_human_input", return_value=mock_review_input)
rsp, confirmed = await AskReview().run()
assert rsp == mock_review_input
assert confirmed
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/di/test_write_analysis_code.py | tests/metagpt/actions/di/test_write_analysis_code.py | import pytest
from metagpt.actions.di.write_analysis_code import WriteAnalysisCode
from metagpt.schema import Message
@pytest.mark.asyncio
async def test_write_code_with_plan():
write_code = WriteAnalysisCode()
user_requirement = "Run data analysis on sklearn Iris dataset, include a plot"
plan_status = "\n## Finished Tasks\n### code\n```python\n\n```\n\n### execution result\n\n\n## Current Task\nLoad the sklearn Iris dataset and perform exploratory data analysis\n\n## Task Guidance\nWrite complete code for 'Current Task'. And avoid duplicating code from 'Finished Tasks', such as repeated import of packages, reading data, etc.\nSpecifically, \nThe current task is about exploratory data analysis, please note the following:\n- Distinguish column types with `select_dtypes` for tailored analysis and visualization, such as correlation.\n- Remember to `import numpy as np` before using Numpy functions.\n\n"
code = await write_code.run(user_requirement=user_requirement, plan_status=plan_status)
assert len(code) > 0
assert "sklearn" in code
@pytest.mark.asyncio
async def test_write_code_with_tools():
write_code = WriteAnalysisCode()
user_requirement = "Preprocess sklearn Wine recognition dataset and train a model to predict wine class (20% as validation), and show validation accuracy."
tool_info = """
## Capabilities
- You can utilize pre-defined tools in any code lines from 'Available Tools' in the form of Python class or function.
- You can freely combine the use of any other public packages, like sklearn, numpy, pandas, etc..
## Available Tools:
Each tool is described in JSON format. When you call a tool, import the tool from its path first.
{'FillMissingValue': {'type': 'class', 'description': 'Completing missing values with simple strategies.', 'methods': {'__init__': {'type': 'function', 'description': 'Initialize self. ', 'signature': '(self, features: \'list\', strategy: "Literal[\'mean\', \'median\', \'most_frequent\', \'constant\']" = \'mean\', fill_value=None)', 'parameters': 'Args: features (list): Columns to be processed. strategy (Literal["mean", "median", "most_frequent", "constant"], optional): The imputation strategy, notice \'mean\' and \'median\' can only be used for numeric features. Defaults to \'mean\'. fill_value (int, optional): Fill_value is used to replace all occurrences of missing_values. Defaults to None.'}, 'fit': {'type': 'function', 'description': 'Fit a model to be used in subsequent transform. ', 'signature': "(self, df: 'pd.DataFrame')", 'parameters': 'Args: df (pd.DataFrame): The input DataFrame.'}, 'fit_transform': {'type': 'function', 'description': 'Fit and transform the input DataFrame. ', 'signature': "(self, df: 'pd.DataFrame') -> 'pd.DataFrame'", 'parameters': 'Args: df (pd.DataFrame): The input DataFrame. Returns: pd.DataFrame: The transformed DataFrame.'}, 'transform': {'type': 'function', 'description': 'Transform the input DataFrame with the fitted model. ', 'signature': "(self, df: 'pd.DataFrame') -> 'pd.DataFrame'", 'parameters': 'Args: df (pd.DataFrame): The input DataFrame. Returns: pd.DataFrame: The transformed DataFrame.'}}, 'tool_path': 'metagpt/tools/libs/data_preprocess.py'}
"""
code = await write_code.run(user_requirement=user_requirement, tool_info=tool_info)
assert len(code) > 0
assert "metagpt.tools.libs" in code
@pytest.mark.asyncio
async def test_debug_with_reflection():
user_requirement = "read a dataset test.csv and print its head"
plan_status = """
## Finished Tasks
### code
```python
```
### execution result
## Current Task
import pandas and load the dataset from 'test.csv'.
## Task Guidance
Write complete code for 'Current Task'. And avoid duplicating code from 'Finished Tasks', such as repeated import of packages, reading data, etc.
Specifically,
"""
wrong_code = """import pandas as pd\ndata = pd.read_excel('test.csv')\ndata""" # use read_excel to read a csv
error = """
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
File "/Users/gary/miniconda3/envs/py39_scratch/lib/python3.9/site-packages/pandas/io/excel/_base.py", line 478, in read_excel
io = ExcelFile(io, storage_options=storage_options, engine=engine)
File "/Users/gary/miniconda3/envs/py39_scratch/lib/python3.9/site-packages/pandas/io/excel/_base.py", line 1500, in __init__
raise ValueError(
ValueError: Excel file format cannot be determined, you must specify an engine manually.
"""
working_memory = [
Message(content=wrong_code, role="assistant"),
Message(content=error, role="user"),
]
new_code = await WriteAnalysisCode().run(
user_requirement=user_requirement,
plan_status=plan_status,
working_memory=working_memory,
use_reflection=True,
)
assert "read_csv" in new_code # should correct read_excel to read_csv
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/actions/di/test_write_plan.py | tests/metagpt/actions/di/test_write_plan.py | import pytest
from metagpt.actions.di.write_plan import (
Plan,
Task,
WritePlan,
precheck_update_plan_from_rsp,
)
from metagpt.schema import Message
def test_precheck_update_plan_from_rsp():
plan = Plan(goal="")
plan.add_tasks([Task(task_id="1")])
rsp = '[{"task_id": "2"}]'
success, _ = precheck_update_plan_from_rsp(rsp, plan)
assert success
assert len(plan.tasks) == 1 and plan.tasks[0].task_id == "1" # precheck should not change the original one
invalid_rsp = "wrong"
success, _ = precheck_update_plan_from_rsp(invalid_rsp, plan)
assert not success
@pytest.mark.asyncio
async def test_write_plan():
rsp = await WritePlan().run(
context=[Message("Run data analysis on sklearn Iris dataset, include a plot", role="user")]
)
assert "task_id" in rsp
assert "instruction" in rsp
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/memory/test_role_zero_memory.py | tests/metagpt/memory/test_role_zero_memory.py | from datetime import datetime, timedelta
import pytest
from metagpt.actions import UserRequirement
from metagpt.const import TEAMLEADER_NAME
from metagpt.memory.role_zero_memory import RoleZeroLongTermMemory
from metagpt.schema import AIMessage, LongTermMemoryItem, Message, UserMessage
class TestRoleZeroLongTermMemory:
@pytest.fixture
def mock_memory(self, mocker) -> RoleZeroLongTermMemory:
memory = RoleZeroLongTermMemory()
memory._resolve_rag_engine = mocker.Mock()
return memory
def test_add(self, mocker, mock_memory: RoleZeroLongTermMemory):
mock_memory._should_use_longterm_memory_for_add = mocker.Mock(return_value=True)
mock_memory._transfer_to_longterm_memory = mocker.Mock()
message = UserMessage(content="test")
mock_memory.add(message)
assert mock_memory.storage[-1] == message
mock_memory._transfer_to_longterm_memory.assert_called_once()
def test_get(self, mocker, mock_memory: RoleZeroLongTermMemory):
mock_memory._should_use_longterm_memory_for_get = mocker.Mock(return_value=True)
mock_memory._build_longterm_memory_query = mocker.Mock(return_value="query")
mock_memory._fetch_longterm_memories = mocker.Mock(return_value=[Message(content="long-term")])
mock_memory.storage = [Message(content="short-term")]
result = mock_memory.get()
assert len(result) == 2
assert result[0].content == "long-term"
assert result[1].content == "short-term"
def test_should_use_longterm_memory_for_add(self, mocker, mock_memory: RoleZeroLongTermMemory):
mocker.patch.object(mock_memory, "storage", [None] * 201)
mock_memory.memory_k = 200
assert mock_memory._should_use_longterm_memory_for_add() == True
mocker.patch.object(mock_memory, "storage", [None] * 199)
assert mock_memory._should_use_longterm_memory_for_add() == False
@pytest.mark.parametrize(
"k,is_last_from_user,count,expected",
[
(0, True, 201, False),
(1, False, 201, False),
(1, True, 199, False),
(1, True, 201, True),
],
)
def test_should_use_longterm_memory_for_get(
self, mocker, mock_memory: RoleZeroLongTermMemory, k, is_last_from_user, count, expected
):
mock_memory._is_last_message_from_user_requirement = mocker.Mock(return_value=is_last_from_user)
mocker.patch.object(mock_memory, "storage", [None] * count)
mock_memory.memory_k = 200
assert mock_memory._should_use_longterm_memory_for_get(k) == expected
def test_transfer_to_longterm_memory(self, mocker, mock_memory: RoleZeroLongTermMemory):
mock_item = mocker.Mock()
mock_memory._get_longterm_memory_item = mocker.Mock(return_value=mock_item)
mock_memory._add_to_longterm_memory = mocker.Mock()
mock_memory._transfer_to_longterm_memory()
mock_memory._add_to_longterm_memory.assert_called_once_with(mock_item)
def test_get_longterm_memory_item(self, mocker, mock_memory: RoleZeroLongTermMemory):
mock_message = Message(content="test")
mock_memory.storage = [mock_message, mock_message]
mock_memory.memory_k = 1
result = mock_memory._get_longterm_memory_item()
assert isinstance(result, LongTermMemoryItem)
assert result.message == mock_message
def test_add_to_longterm_memory(self, mock_memory: RoleZeroLongTermMemory):
item = LongTermMemoryItem(message=Message(content="test"))
mock_memory._add_to_longterm_memory(item)
mock_memory.rag_engine.add_objs.assert_called_once_with([item])
def test_build_longterm_memory_query(self, mocker, mock_memory: RoleZeroLongTermMemory):
mock_message = Message(content="query")
mock_memory._get_the_last_message = mocker.Mock(return_value=mock_message)
result = mock_memory._build_longterm_memory_query()
assert result == "query"
def test_get_the_last_message(self, mock_memory: RoleZeroLongTermMemory):
mock_memory.storage = [Message(content="1"), Message(content="2")]
result = mock_memory._get_the_last_message()
assert result.content == "2"
@pytest.mark.parametrize(
"message,expected",
[
(UserMessage(content="test", cause_by=UserRequirement), True),
(UserMessage(content="test", sent_from=TEAMLEADER_NAME), True),
(UserMessage(content="test"), True),
(AIMessage(content="test"), False),
(None, False),
],
)
def test_is_last_message_from_user_requirement(
self, mocker, mock_memory: RoleZeroLongTermMemory, message, expected
):
mock_memory._get_the_last_message = mocker.Mock(return_value=message)
assert mock_memory._is_last_message_from_user_requirement() == expected
def test_fetch_longterm_memories(self, mocker, mock_memory: RoleZeroLongTermMemory):
mock_nodes = [mocker.Mock(), mocker.Mock()]
mock_memory.rag_engine.retrieve = mocker.Mock(return_value=mock_nodes)
mock_items = [
LongTermMemoryItem(message=UserMessage(content="user1")),
LongTermMemoryItem(message=AIMessage(content="ai1")),
]
mock_memory._get_items_from_nodes = mocker.Mock(return_value=mock_items)
result = mock_memory._fetch_longterm_memories("query")
assert len(result) == 2
assert result[0].content == "user1"
assert result[1].content == "ai1"
def test_get_items_from_nodes(self, mocker, mock_memory: RoleZeroLongTermMemory):
now = datetime.now()
mock_nodes = [
mocker.Mock(
metadata={
"obj": LongTermMemoryItem(
message=Message(content="2"), created_at=(now - timedelta(minutes=1)).timestamp()
)
}
),
mocker.Mock(
metadata={
"obj": LongTermMemoryItem(
message=Message(content="1"), created_at=(now - timedelta(minutes=2)).timestamp()
)
}
),
mocker.Mock(metadata={"obj": LongTermMemoryItem(message=Message(content="3"), created_at=now.timestamp())}),
]
result = mock_memory._get_items_from_nodes(mock_nodes)
assert len(result) == 3
assert [item.message.content for item in result] == ["1", "2", "3"]
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/memory/test_memory_storage.py | tests/metagpt/memory/test_memory_storage.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Desc : the unittests of metagpt/memory/memory_storage.py
"""
import shutil
from pathlib import Path
from typing import List
import pytest
from metagpt.actions import UserRequirement, WritePRD
from metagpt.actions.action_node import ActionNode
from metagpt.const import DATA_PATH
from metagpt.memory.memory_storage import MemoryStorage
from metagpt.schema import Message
from tests.metagpt.memory.mock_text_embed import (
mock_openai_aembed_document,
mock_openai_embed_document,
mock_openai_embed_documents,
text_embed_arr,
)
@pytest.mark.asyncio
async def test_idea_message(mocker):
mocker.patch("llama_index.embeddings.openai.base.OpenAIEmbedding._get_text_embeddings", mock_openai_embed_documents)
mocker.patch("llama_index.embeddings.openai.base.OpenAIEmbedding._get_text_embedding", mock_openai_embed_document)
mocker.patch(
"llama_index.embeddings.openai.base.OpenAIEmbedding._aget_query_embedding", mock_openai_aembed_document
)
idea = text_embed_arr[0].get("text", "Write a cli snake game")
role_id = "UTUser1(Product Manager)"
message = Message(role="User", content=idea, cause_by=UserRequirement)
shutil.rmtree(Path(DATA_PATH / f"role_mem/{role_id}/"), ignore_errors=True)
memory_storage: MemoryStorage = MemoryStorage()
memory_storage.recover_memory(role_id)
memory_storage.add(message)
assert memory_storage.is_initialized is True
sim_idea = text_embed_arr[1].get("text", "Write a game of cli snake")
sim_message = Message(role="User", content=sim_idea, cause_by=UserRequirement)
new_messages = await memory_storage.search_similar(sim_message)
assert len(new_messages) == 1 # similar, return []
new_idea = text_embed_arr[2].get("text", "Write a 2048 web game")
new_message = Message(role="User", content=new_idea, cause_by=UserRequirement)
new_messages = await memory_storage.search_similar(new_message)
assert len(new_messages) == 0
memory_storage.clean()
assert memory_storage.is_initialized is False
@pytest.mark.asyncio
async def test_actionout_message(mocker):
mocker.patch("llama_index.embeddings.openai.base.OpenAIEmbedding._get_text_embeddings", mock_openai_embed_documents)
mocker.patch("llama_index.embeddings.openai.base.OpenAIEmbedding._get_text_embedding", mock_openai_embed_document)
mocker.patch(
"llama_index.embeddings.openai.base.OpenAIEmbedding._aget_query_embedding", mock_openai_aembed_document
)
out_mapping = {"field1": (str, ...), "field2": (List[str], ...)}
out_data = {"field1": "field1 value", "field2": ["field2 value1", "field2 value2"]}
ic_obj = ActionNode.create_model_class("prd", out_mapping)
role_id = "UTUser2(Architect)"
content = text_embed_arr[4].get(
"text", "The user has requested the creation of a command-line interface (CLI) snake game"
)
message = Message(
content=content, instruct_content=ic_obj(**out_data), role="user", cause_by=WritePRD
) # WritePRD as test action
shutil.rmtree(Path(DATA_PATH / f"role_mem/{role_id}/"), ignore_errors=True)
memory_storage: MemoryStorage = MemoryStorage()
memory_storage.recover_memory(role_id)
memory_storage.add(message)
assert memory_storage.is_initialized is True
sim_conent = text_embed_arr[5].get("text", "The request is command-line interface (CLI) snake game")
sim_message = Message(content=sim_conent, instruct_content=ic_obj(**out_data), role="user", cause_by=WritePRD)
new_messages = await memory_storage.search_similar(sim_message)
assert len(new_messages) == 1 # similar, return []
new_conent = text_embed_arr[6].get(
"text", "Incorporate basic features of a snake game such as scoring and increasing difficulty"
)
new_message = Message(content=new_conent, instruct_content=ic_obj(**out_data), role="user", cause_by=WritePRD)
new_messages = await memory_storage.search_similar(new_message)
assert len(new_messages) == 0
memory_storage.clean()
assert memory_storage.is_initialized is False
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/memory/mock_text_embed.py | tests/metagpt/memory/mock_text_embed.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
import numpy as np
dim = 1536 # openai embedding dim
embed_zeros_arrr = np.zeros(shape=[1, dim]).tolist()
embed_ones_arrr = np.ones(shape=[1, dim]).tolist()
text_embed_arr = [
{"text": "Write a cli snake game", "embed": embed_zeros_arrr}, # mock data, same as below
{"text": "Write a game of cli snake", "embed": embed_zeros_arrr},
{"text": "Write a 2048 web game", "embed": embed_ones_arrr},
{"text": "Write a Battle City", "embed": embed_ones_arrr},
{
"text": "The user has requested the creation of a command-line interface (CLI) snake game",
"embed": embed_zeros_arrr,
},
{"text": "The request is command-line interface (CLI) snake game", "embed": embed_zeros_arrr},
{
"text": "Incorporate basic features of a snake game such as scoring and increasing difficulty",
"embed": embed_ones_arrr,
},
]
text_idx_dict = {item["text"]: idx for idx, item in enumerate(text_embed_arr)}
def mock_openai_embed_documents(self, texts: list[str], show_progress: bool = False) -> list[list[float]]:
idx = text_idx_dict.get(texts[0])
embed = text_embed_arr[idx].get("embed")
return embed
def mock_openai_embed_document(self, text: str) -> list[float]:
embeds = mock_openai_embed_documents(self, [text])
return embeds[0]
async def mock_openai_aembed_document(self, text: str) -> list[float]:
return mock_openai_embed_document(self, text)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/memory/test_memory.py | tests/metagpt/memory/test_memory.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : the unittest of Memory
from metagpt.actions import UserRequirement
from metagpt.memory.memory import Memory
from metagpt.schema import Message
def test_memory():
memory = Memory()
message1 = Message(content="test message1", role="user1")
message2 = Message(content="test message2", role="user2")
message3 = Message(content="test message3", role="user1")
memory.add(message1)
assert memory.count() == 1
memory.delete_newest()
assert memory.count() == 0
memory.add_batch([message1, message2])
assert memory.count() == 2
assert len(memory.index.get(message1.cause_by)) == 2
messages = memory.get_by_role("user1")
assert messages[0].content == message1.content
messages = memory.get_by_content("test message")
assert len(messages) == 2
messages = memory.get_by_action(UserRequirement)
assert len(messages) == 2
messages = memory.get_by_actions({UserRequirement})
assert len(messages) == 2
messages = memory.try_remember("test message")
assert len(messages) == 2
messages = memory.get(k=1)
assert len(messages) == 1
messages = memory.get(k=5)
assert len(messages) == 2
messages = memory.find_news([message3])
assert len(messages) == 1
memory.delete(message1)
assert memory.count() == 1
messages = memory.get_by_role("user2")
assert messages[0].content == message2.content
memory.clear()
assert memory.count() == 0
assert len(memory.index) == 0
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/memory/__init__.py | tests/metagpt/memory/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/memory/test_brain_memory.py | tests/metagpt/memory/test_brain_memory.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/8/27
@Author : mashenquan
@File : test_brain_memory.py
"""
import pytest
from metagpt.llm import LLM
from metagpt.memory.brain_memory import BrainMemory
from metagpt.schema import Message
@pytest.mark.asyncio
async def test_memory():
memory = BrainMemory()
memory.add_talk(Message(content="talk"))
assert memory.history[0].role == "user"
memory.add_answer(Message(content="answer"))
assert memory.history[1].role == "assistant"
redis_key = BrainMemory.to_redis_key("none", "user_id", "chat_id")
await memory.dumps(redis_key=redis_key)
assert memory.exists("talk")
assert 1 == memory.to_int("1", 0)
memory.last_talk = "AAA"
assert memory.pop_last_talk() == "AAA"
assert memory.last_talk is None
assert memory.is_history_available
assert memory.history_text
memory = await BrainMemory.loads(redis_key=redis_key)
assert memory
@pytest.mark.parametrize(
("input", "tag", "val"),
[("[TALK]:Hello", "TALK", "Hello"), ("Hello", None, "Hello"), ("[TALK]Hello", None, "[TALK]Hello")],
)
def test_extract_info(input, tag, val):
t, v = BrainMemory.extract_info(input)
assert tag == t
assert val == v
@pytest.mark.asyncio
@pytest.mark.parametrize("llm", [LLM()])
async def test_memory_llm(llm):
memory = BrainMemory()
for i in range(500):
memory.add_talk(Message(content="Lily is a girl.\n"))
res = await memory.is_related("apple", "moon", llm)
assert not res
res = await memory.rewrite(sentence="apple Lily eating", context="", llm=llm)
assert "Lily" in res
res = await memory.summarize(llm=llm)
assert res
res = await memory.get_title(llm=llm)
assert res
assert "Lily" in res
assert memory.history or memory.historical_summary
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/memory/test_longterm_memory.py | tests/metagpt/memory/test_longterm_memory.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Desc : unittest of `metagpt/memory/longterm_memory.py`
"""
import pytest
from metagpt.actions import UserRequirement
from metagpt.memory.longterm_memory import LongTermMemory
from metagpt.roles.role import RoleContext
from metagpt.schema import Message
from tests.metagpt.memory.mock_text_embed import (
mock_openai_aembed_document,
mock_openai_embed_document,
mock_openai_embed_documents,
text_embed_arr,
)
@pytest.mark.asyncio
async def test_ltm_search(mocker):
mocker.patch("llama_index.embeddings.openai.base.OpenAIEmbedding._get_text_embeddings", mock_openai_embed_documents)
mocker.patch("llama_index.embeddings.openai.base.OpenAIEmbedding._get_text_embedding", mock_openai_embed_document)
mocker.patch(
"llama_index.embeddings.openai.base.OpenAIEmbedding._aget_query_embedding", mock_openai_aembed_document
)
role_id = "UTUserLtm(Product Manager)"
rc = RoleContext(watch={"metagpt.actions.add_requirement.UserRequirement"})
ltm = LongTermMemory()
ltm.recover_memory(role_id, rc)
idea = text_embed_arr[0].get("text", "Write a cli snake game")
message = Message(role="User", content=idea, cause_by=UserRequirement)
news = await ltm.find_news([message])
assert len(news) == 1
ltm.add(message)
sim_idea = text_embed_arr[1].get("text", "Write a game of cli snake")
sim_message = Message(role="User", content=sim_idea, cause_by=UserRequirement)
news = await ltm.find_news([sim_message])
assert len(news) == 0
ltm.add(sim_message)
new_idea = text_embed_arr[2].get("text", "Write a 2048 web game")
new_message = Message(role="User", content=new_idea, cause_by=UserRequirement)
news = await ltm.find_news([new_message])
assert len(news) == 1
ltm.add(new_message)
ltm.clear()
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/serialize_deserialize/test_prepare_interview.py | tests/metagpt/serialize_deserialize/test_prepare_interview.py | # -*- coding: utf-8 -*-
# @Desc :
import pytest
from metagpt.actions.action_node import ActionNode
from metagpt.actions.prepare_interview import PrepareInterview
@pytest.mark.asyncio
async def test_action_serdeser(context):
action = PrepareInterview(context=context)
serialized_data = action.model_dump()
assert serialized_data["name"] == "PrepareInterview"
new_action = PrepareInterview(**serialized_data, context=context)
assert new_action.name == "PrepareInterview"
assert type(await new_action.run("python developer")) == ActionNode
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/serialize_deserialize/test_write_tutorial.py | tests/metagpt/serialize_deserialize/test_write_tutorial.py | # -*- coding: utf-8 -*-
# @Desc :
from typing import Dict
import pytest
from metagpt.actions.write_tutorial import WriteContent, WriteDirectory
@pytest.mark.asyncio
@pytest.mark.parametrize(("language", "topic"), [("English", "Write a tutorial about Python")])
async def test_write_directory_serdeser(language: str, topic: str, context):
action = WriteDirectory(context=context)
serialized_data = action.model_dump()
assert serialized_data["name"] == "WriteDirectory"
assert serialized_data["language"] == "Chinese"
new_action = WriteDirectory(**serialized_data, context=context)
ret = await new_action.run(topic=topic)
assert isinstance(ret, dict)
assert "title" in ret
assert "directory" in ret
assert isinstance(ret["directory"], list)
assert len(ret["directory"])
assert isinstance(ret["directory"][0], dict)
@pytest.mark.asyncio
@pytest.mark.parametrize(
("language", "topic", "directory"),
[("English", "Write a tutorial about Python", {"Introduction": ["What is Python?", "Why learn Python?"]})],
)
async def test_write_content_serdeser(language: str, topic: str, directory: Dict, context):
action = WriteContent(language=language, directory=directory, context=context)
serialized_data = action.model_dump()
assert serialized_data["name"] == "WriteContent"
new_action = WriteContent(**serialized_data, context=context)
ret = await new_action.run(topic=topic)
assert isinstance(ret, str)
assert list(directory.keys())[0] in ret
for value in list(directory.values())[0]:
assert value in ret
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/serialize_deserialize/test_write_review.py | tests/metagpt/serialize_deserialize/test_write_review.py | # -*- coding: utf-8 -*-
# @Desc :
import pytest
from metagpt.actions.action_node import ActionNode
from metagpt.actions.write_review import WriteReview
TEMPLATE_CONTEXT = """
{
"Language": "zh_cn",
"Programming Language": "Python",
"Original Requirements": "写一个简单的2048",
"Project Name": "game_2048",
"Product Goals": [
"创建一个引人入胜的用户体验",
"确保高性能",
"提供可定制的功能"
],
"User Stories": [
"作为用户,我希望能够选择不同的难度级别",
"作为玩家,我希望在每局游戏结束后能看到我的得分"
],
"Competitive Analysis": [
"Python Snake Game: 界面简单,缺乏高级功能"
],
"Competitive Quadrant Chart": "quadrantChart\n title \"Reach and engagement of campaigns\"\n x-axis \"Low Reach\" --> \"High Reach\"\n y-axis \"Low Engagement\" --> \"High Engagement\"\n quadrant-1 \"我们应该扩展\"\n quadrant-2 \"需要推广\"\n quadrant-3 \"重新评估\"\n quadrant-4 \"可能需要改进\"\n \"Campaign A\": [0.3, 0.6]\n \"Campaign B\": [0.45, 0.23]\n \"Campaign C\": [0.57, 0.69]\n \"Campaign D\": [0.78, 0.34]\n \"Campaign E\": [0.40, 0.34]\n \"Campaign F\": [0.35, 0.78]\n \"Our Target Product\": [0.5, 0.6]",
"Requirement Analysis": "产品应该用户友好。",
"Requirement Pool": [
[
"P0",
"主要代码..."
],
[
"P0",
"游戏算法..."
]
],
"UI Design draft": "基本功能描述,简单的风格和布局。",
"Anything UNCLEAR": "..."
}
"""
@pytest.mark.asyncio
async def test_action_serdeser(context):
action = WriteReview(context=context)
serialized_data = action.model_dump()
assert serialized_data["name"] == "WriteReview"
new_action = WriteReview(**serialized_data, context=context)
review = await new_action.run(TEMPLATE_CONTEXT)
assert new_action.name == "WriteReview"
assert type(review) == ActionNode
assert review.instruct_content
assert review.get("LGTM") in ["LGTM", "LBTM"]
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/serialize_deserialize/test_reasearcher.py | tests/metagpt/serialize_deserialize/test_reasearcher.py | # -*- coding: utf-8 -*-
# @Desc :
import pytest
from metagpt.actions import CollectLinks
from metagpt.roles.researcher import Researcher
@pytest.mark.asyncio
async def test_tutorial_assistant_serdeser(context):
role = Researcher(context=context)
ser_role_dict = role.model_dump()
assert "name" in ser_role_dict
assert "language" in ser_role_dict
new_role = Researcher(**ser_role_dict, context=context)
assert new_role.language == "en-us"
assert len(new_role.actions) == 3
assert isinstance(new_role.actions[0], CollectLinks)
# todo: 需要测试不同的action失败下,记忆是否正常保存
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/serialize_deserialize/test_environment.py | tests/metagpt/serialize_deserialize/test_environment.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
import pytest
from metagpt.actions.action_node import ActionNode
from metagpt.actions.add_requirement import UserRequirement
from metagpt.actions.project_management import WriteTasks
from metagpt.environment import Environment
from metagpt.roles.project_manager import ProjectManager
from metagpt.schema import Message
from metagpt.utils.common import any_to_str, read_json_file, write_json_file
from tests.metagpt.serialize_deserialize.test_serdeser_base import (
ActionOK,
ActionRaise,
RoleC,
serdeser_path,
)
def test_env_serdeser(context):
env = Environment(context=context)
env.publish_message(message=Message(content="test env serialize"))
ser_env_dict = env.model_dump()
assert "roles" in ser_env_dict
assert len(ser_env_dict["roles"]) == 0
new_env = Environment(**ser_env_dict, context=context)
assert len(new_env.roles) == 0
assert len(new_env.history) == 25
def test_environment_serdeser(context):
out_mapping = {"field1": (list[str], ...)}
out_data = {"field1": ["field1 value1", "field1 value2"]}
ic_obj = ActionNode.create_model_class("prd", out_mapping)
message = Message(
content="prd", instruct_content=ic_obj(**out_data), role="product manager", cause_by=any_to_str(UserRequirement)
)
environment = Environment(context=context)
role_c = RoleC()
environment.add_role(role_c)
environment.publish_message(message)
ser_data = environment.model_dump()
assert ser_data["roles"]["Role C"]["name"] == "RoleC"
new_env: Environment = Environment(**ser_data, context=context)
assert len(new_env.roles) == 1
assert list(new_env.roles.values())[0].states == list(environment.roles.values())[0].states
assert isinstance(list(environment.roles.values())[0].actions[0], ActionOK)
assert type(list(new_env.roles.values())[0].actions[0]) == ActionOK
assert type(list(new_env.roles.values())[0].actions[1]) == ActionRaise
assert list(new_env.roles.values())[0].rc.watch == role_c.rc.watch
def test_environment_serdeser_v2(context):
environment = Environment(context=context)
pm = ProjectManager()
environment.add_role(pm)
ser_data = environment.model_dump()
new_env: Environment = Environment(**ser_data, context=context)
role = new_env.get_role(pm.profile)
assert isinstance(role, ProjectManager)
assert isinstance(role.actions[0], WriteTasks)
assert isinstance(list(new_env.roles.values())[0].actions[0], WriteTasks)
assert list(new_env.roles.values())[0].rc.watch == pm.rc.watch
def test_environment_serdeser_save(context):
environment = Environment(context=context)
role_c = RoleC()
stg_path = serdeser_path.joinpath("team", "environment")
env_path = stg_path.joinpath("env.json")
environment.add_role(role_c)
write_json_file(env_path, environment.model_dump())
env_dict = read_json_file(env_path)
new_env: Environment = Environment(**env_dict, context=context)
assert len(new_env.roles) == 1
assert type(list(new_env.roles.values())[0].actions[0]) == ActionOK
assert list(new_env.roles.values())[0].rc.watch == role_c.rc.watch
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/serialize_deserialize/test_architect.py | tests/metagpt/serialize_deserialize/test_architect.py | # -*- coding: utf-8 -*-
# @Date : 11/26/2023 2:04 PM
# @Author : stellahong (stellahong@fuzhi.ai)
# @Desc :
import pytest
from metagpt.actions.action import Action
from metagpt.roles.architect import Architect
@pytest.mark.asyncio
async def test_architect_serdeser(context):
role = Architect(context=context)
ser_role_dict = role.model_dump(by_alias=True)
assert "name" in ser_role_dict
assert "states" in ser_role_dict
assert "actions" in ser_role_dict
new_role = Architect(**ser_role_dict, context=context)
assert new_role.name == "Bob"
assert len(new_role.actions) == 1
assert len(new_role.rc.watch) == 1
assert isinstance(new_role.actions[0], Action)
await new_role.actions[0].run(with_messages="write a cli snake game")
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/serialize_deserialize/test_serdeser_base.py | tests/metagpt/serialize_deserialize/test_serdeser_base.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : base test actions / roles used in unittest
import asyncio
from pathlib import Path
from typing import Optional
from pydantic import BaseModel, Field
from metagpt.actions import Action, ActionOutput, UserRequirement
from metagpt.actions.action_node import ActionNode
from metagpt.actions.fix_bug import FixBug
from metagpt.roles.role import Role, RoleReactMode
serdeser_path = Path(__file__).absolute().parent.joinpath("..", "..", "data", "serdeser_storage")
class MockMessage(BaseModel):
content: str = "test_msg"
class MockICMessage(BaseModel):
"""to test normal dict without postprocess"""
content: str = "test_ic_msg"
instruct_content: Optional[BaseModel] = Field(default=None)
class ActionPass(Action):
name: str = "ActionPass"
async def run(self, messages: list["Message"]) -> ActionOutput:
await asyncio.sleep(5) # sleep to make other roles can watch the executed Message
output_mapping = {"result": (str, ...)}
pass_class = ActionNode.create_model_class("pass", output_mapping)
pass_output = ActionOutput("ActionPass run passed", pass_class(**{"result": "pass result"}))
return pass_output
class ActionOK(Action):
name: str = "ActionOK"
async def run(self, messages: list["Message"]) -> str:
await asyncio.sleep(5)
return "ok"
class ActionRaise(Action):
name: str = "ActionRaise"
async def run(self, messages: list["Message"]) -> str:
raise RuntimeError("parse error in ActionRaise")
class ActionOKV2(Action):
name: str = "ActionOKV2"
extra_field: str = "ActionOKV2 Extra Info"
class RoleA(Role):
name: str = Field(default="RoleA")
profile: str = Field(default="Role A")
goal: str = "RoleA's goal"
constraints: str = "RoleA's constraints"
def __init__(self, **kwargs):
super(RoleA, self).__init__(**kwargs)
self.set_actions([ActionPass])
self._watch([FixBug, UserRequirement])
class RoleB(Role):
name: str = Field(default="RoleB")
profile: str = Field(default="Role B")
goal: str = "RoleB's goal"
constraints: str = "RoleB's constraints"
def __init__(self, **kwargs):
super(RoleB, self).__init__(**kwargs)
self.set_actions([ActionOK, ActionRaise])
self._watch([ActionPass])
self.rc.react_mode = RoleReactMode.BY_ORDER
class RoleC(Role):
name: str = Field(default="RoleC")
profile: str = Field(default="Role C")
goal: str = "RoleC's goal"
constraints: str = "RoleC's constraints"
def __init__(self, **kwargs):
super(RoleC, self).__init__(**kwargs)
self.set_actions([ActionOK, ActionRaise])
self._watch([FixBug, UserRequirement])
self.rc.react_mode = RoleReactMode.BY_ORDER
self.rc.memory.ignore_id = True
class RoleD(Role):
name: str = Field(default="RoleD")
profile: str = Field(default="Role D")
goal: str = "RoleD's goal"
constraints: str = "RoleD's constraints"
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/serialize_deserialize/test_action.py | tests/metagpt/serialize_deserialize/test_action.py | # -*- coding: utf-8 -*-
# @Date : 11/22/2023 11:48 AM
# @Author : stellahong (stellahong@fuzhi.ai)
# @Desc :
import pytest
from metagpt.actions import Action
@pytest.mark.asyncio
async def test_action_serdeser(context):
action = Action(context=context)
ser_action_dict = action.model_dump()
assert "name" in ser_action_dict
assert "llm" not in ser_action_dict # not export
assert "__module_class_name" in ser_action_dict
action = Action(name="test", context=context)
ser_action_dict = action.model_dump()
assert "test" in ser_action_dict["name"]
new_action = Action(**ser_action_dict, context=context)
assert new_action.name == "test"
assert isinstance(new_action.llm, type(context.llm()))
assert len(await new_action._aask("who are you")) > 0
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/serialize_deserialize/test_team.py | tests/metagpt/serialize_deserialize/test_team.py | # -*- coding: utf-8 -*-
# @Date : 11/27/2023 10:07 AM
# @Author : stellahong (stellahong@fuzhi.ai)
# @Desc :
import shutil
from pathlib import Path
import pytest
from metagpt.context import Context
from metagpt.logs import logger
from metagpt.roles import Architect, ProductManager, ProjectManager
from metagpt.team import Team
from metagpt.utils.common import write_json_file
from tests.metagpt.serialize_deserialize.test_serdeser_base import (
ActionOK,
RoleA,
RoleB,
RoleC,
serdeser_path,
)
def test_team_deserialize(context):
company = Team(context=context)
pm = ProductManager()
arch = Architect()
company.hire(
[
pm,
arch,
ProjectManager(),
]
)
assert len(company.env.get_roles()) == 3
ser_company = company.model_dump()
new_company = Team.model_validate(ser_company)
assert len(new_company.env.get_roles()) == 3
assert new_company.env.get_role(pm.profile) is not None
new_pm = new_company.env.get_role(pm.profile)
assert type(new_pm) == ProductManager
assert new_company.env.get_role(pm.profile) is not None
assert new_company.env.get_role(arch.profile) is not None
def mock_team_serialize(self, stg_path: Path = serdeser_path.joinpath("team")):
team_info_path = stg_path.joinpath("team.json")
write_json_file(team_info_path, self.model_dump())
def test_team_serdeser_save(mocker, context):
mocker.patch("metagpt.team.Team.serialize", mock_team_serialize)
company = Team(context=context)
company.hire([RoleC()])
stg_path = serdeser_path.joinpath("team")
shutil.rmtree(stg_path, ignore_errors=True)
company.serialize(stg_path=stg_path)
new_company = Team.deserialize(stg_path)
assert len(new_company.env.roles) == 1
@pytest.mark.asyncio
async def test_team_recover(mocker, context):
mocker.patch("metagpt.team.Team.serialize", mock_team_serialize)
idea = "write a snake game"
stg_path = serdeser_path.joinpath("team")
shutil.rmtree(stg_path, ignore_errors=True)
company = Team(context=context)
role_c = RoleC()
company.hire([role_c])
company.run_project(idea)
await company.run(n_round=4)
ser_data = company.model_dump()
new_company = Team(**ser_data)
new_role_c = new_company.env.get_role(role_c.profile)
assert new_role_c.rc.memory == role_c.rc.memory
assert new_role_c.rc.env != role_c.rc.env
assert type(list(new_company.env.roles.values())[0].actions[0]) == ActionOK
new_company.run_project(idea)
await new_company.run(n_round=4)
@pytest.mark.asyncio
async def test_team_recover_save(mocker, context):
mocker.patch("metagpt.team.Team.serialize", mock_team_serialize)
idea = "write a 2048 web game"
stg_path = serdeser_path.joinpath("team")
shutil.rmtree(stg_path, ignore_errors=True)
company = Team(context=context)
role_c = RoleC()
company.hire([role_c])
company.run_project(idea)
await company.run(n_round=4)
new_company = Team.deserialize(stg_path)
new_role_c = new_company.env.get_role(role_c.profile)
assert new_role_c.rc.memory == role_c.rc.memory
assert new_role_c.rc.env != role_c.rc.env
assert new_role_c.recovered != role_c.recovered # here cause previous ut is `!=`
assert new_role_c.rc.todo != role_c.rc.todo # serialize exclude `rc.todo`
assert new_role_c.rc.news != role_c.rc.news # serialize exclude `rc.news`
new_company.run_project(idea)
await new_company.run(n_round=4)
@pytest.mark.asyncio
async def test_team_recover_multi_roles_save(mocker, context):
mocker.patch("metagpt.team.Team.serialize", mock_team_serialize)
idea = "write a snake game"
stg_path = serdeser_path.joinpath("team")
shutil.rmtree(stg_path, ignore_errors=True)
role_a = RoleA()
role_b = RoleB()
company = Team(context=context)
company.hire([role_a, role_b])
company.run_project(idea)
await company.run(n_round=4)
logger.info("Team recovered")
new_company = Team.deserialize(stg_path)
new_company.run_project(idea)
assert new_company.env.get_role(role_b.profile).rc.state == 1
await new_company.run(n_round=4)
@pytest.mark.asyncio
async def test_context(context):
context.kwargs.set("a", "a")
context.cost_manager.max_budget = 9
company = Team(context=context)
save_to = context.repo.workdir / "serial"
company.serialize(save_to)
company.deserialize(save_to, Context())
assert company.env.context.repo
assert company.env.context.repo.workdir == context.repo.workdir
assert company.env.context.kwargs.a == "a"
assert company.env.context.cost_manager.max_budget == context.cost_manager.max_budget
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/serialize_deserialize/test_write_docstring.py | tests/metagpt/serialize_deserialize/test_write_docstring.py | # -*- coding: utf-8 -*-
# @Desc :
import pytest
from metagpt.actions.write_docstring import WriteDocstring
code = """
def add_numbers(a: int, b: int):
return a + b
class Person:
def __init__(self, name: str, age: int):
self.name = name
self.age = age
def greet(self):
return f"Hello, my name is {self.name} and I am {self.age} years old."
"""
@pytest.mark.asyncio
@pytest.mark.parametrize(
("style", "part"),
[
("google", "Args:"),
("numpy", "Parameters"),
("sphinx", ":param name:"),
],
ids=["google", "numpy", "sphinx"],
)
async def test_action_serdeser(style: str, part: str, context):
action = WriteDocstring(context=context)
serialized_data = action.model_dump()
assert "name" in serialized_data
assert serialized_data["desc"] == "Write docstring for code."
new_action = WriteDocstring(**serialized_data, context=context)
assert new_action.name == "WriteDocstring"
assert new_action.desc == "Write docstring for code."
ret = await new_action.run(code, style=style)
assert part in ret
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/serialize_deserialize/test_role.py | tests/metagpt/serialize_deserialize/test_role.py | # -*- coding: utf-8 -*-
# @Date : 11/23/2023 4:49 PM
# @Author : stellahong (stellahong@fuzhi.ai)
# @Desc :
import shutil
import pytest
from pydantic import BaseModel, SerializeAsAny
from metagpt.actions import WriteCode
from metagpt.actions.add_requirement import UserRequirement
from metagpt.logs import logger
from metagpt.roles.engineer import Engineer
from metagpt.roles.product_manager import ProductManager
from metagpt.roles.role import Role
from metagpt.schema import Message
from metagpt.utils.common import format_trackback_info, read_json_file, write_json_file
from tests.metagpt.serialize_deserialize.test_serdeser_base import (
ActionOK,
RoleA,
RoleB,
RoleC,
RoleD,
serdeser_path,
)
def test_roles(context):
role_a = RoleA()
assert len(role_a.rc.watch) == 2
role_b = RoleB()
assert len(role_a.rc.watch) == 2
assert len(role_b.rc.watch) == 1
role_d = RoleD(actions=[ActionOK()])
assert len(role_d.actions) == 1
def test_role_subclasses(context):
"""test subclasses of role with same fields in ser&deser"""
class RoleSubClasses(BaseModel):
roles: list[SerializeAsAny[Role]] = []
role_subcls = RoleSubClasses(roles=[RoleA(), RoleB()])
role_subcls_dict = role_subcls.model_dump()
new_role_subcls = RoleSubClasses(**role_subcls_dict)
assert isinstance(new_role_subcls.roles[0], RoleA)
assert isinstance(new_role_subcls.roles[1], RoleB)
def test_role_serialize(context):
role = Role()
ser_role_dict = role.model_dump()
assert "name" in ser_role_dict
assert "states" in ser_role_dict
assert "actions" in ser_role_dict
def test_engineer_serdeser(context):
role = Engineer()
ser_role_dict = role.model_dump()
assert "name" in ser_role_dict
assert "states" in ser_role_dict
assert "actions" in ser_role_dict
new_role = Engineer(**ser_role_dict)
assert new_role.name == "Alex"
assert new_role.use_code_review is False
assert len(new_role.actions) == 1
assert isinstance(new_role.actions[0], WriteCode)
def test_role_serdeser_save(context):
shutil.rmtree(serdeser_path.joinpath("team"), ignore_errors=True)
pm = ProductManager()
stg_path = serdeser_path.joinpath("team", "environment", "roles", f"{pm.__class__.__name__}_{pm.name}")
role_path = stg_path.joinpath("role.json")
write_json_file(role_path, pm.model_dump())
role_dict = read_json_file(role_path)
new_pm = ProductManager(**role_dict)
assert new_pm.name == pm.name
assert len(new_pm.get_memories(1)) == 0
@pytest.mark.asyncio
async def test_role_serdeser_interrupt(context):
role_c = RoleC()
shutil.rmtree(serdeser_path.joinpath("team"), ignore_errors=True)
stg_path = serdeser_path.joinpath("team", "environment", "roles", f"{role_c.__class__.__name__}_{role_c.name}")
role_path = stg_path.joinpath("role.json")
try:
await role_c.run(with_message=Message(content="demo", cause_by=UserRequirement))
except Exception:
logger.error(f"Exception in `role_c.run`, detail: {format_trackback_info()}")
write_json_file(role_path, role_c.model_dump())
assert role_c.rc.memory.count() == 1
role_dict = read_json_file(role_path)
new_role_c: Role = RoleC(**role_dict)
assert new_role_c.rc.state == 1
with pytest.raises(Exception):
await new_role_c.run(with_message=Message(content="demo", cause_by=UserRequirement))
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/serialize_deserialize/test_project_manager.py | tests/metagpt/serialize_deserialize/test_project_manager.py | # -*- coding: utf-8 -*-
# @Date : 11/26/2023 2:06 PM
# @Author : stellahong (stellahong@fuzhi.ai)
# @Desc :
import pytest
from metagpt.actions.action import Action
from metagpt.actions.project_management import WriteTasks
from metagpt.roles.project_manager import ProjectManager
@pytest.mark.asyncio
async def test_project_manager_serdeser(context):
role = ProjectManager(context=context)
ser_role_dict = role.model_dump(by_alias=True)
assert "name" in ser_role_dict
assert "states" in ser_role_dict
assert "actions" in ser_role_dict
new_role = ProjectManager(**ser_role_dict, context=context)
assert new_role.name == "Eve"
assert len(new_role.actions) == 1
assert isinstance(new_role.actions[0], Action)
assert isinstance(new_role.actions[0], WriteTasks)
# await new_role.actions[0].run(context="write a cli snake game")
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/serialize_deserialize/test_write_code_review.py | tests/metagpt/serialize_deserialize/test_write_code_review.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : unittest of WriteCodeReview SerDeser
import pytest
from metagpt.actions import WriteCodeReview
from metagpt.schema import CodingContext, Document
@pytest.mark.asyncio
async def test_write_code_review_serdeser(context):
context.src_workspace = context.repo.workdir / "srcs"
code_content = """
def div(a: int, b: int = 0):
return a / b
"""
coding_context = CodingContext(
filename="test_op.py",
design_doc=Document(content="divide two numbers"),
code_doc=Document(content=code_content),
)
action = WriteCodeReview(i_context=coding_context)
serialized_data = action.model_dump()
assert serialized_data["name"] == "WriteCodeReview"
new_action = WriteCodeReview(**serialized_data, context=context)
assert new_action.name == "WriteCodeReview"
await new_action.run()
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/serialize_deserialize/test_memory.py | tests/metagpt/serialize_deserialize/test_memory.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : unittest of memory
from pydantic import BaseModel
from metagpt.actions.action_node import ActionNode
from metagpt.actions.add_requirement import UserRequirement
from metagpt.actions.design_api import WriteDesign
from metagpt.memory.memory import Memory
from metagpt.schema import Message
from metagpt.utils.common import any_to_str, read_json_file, write_json_file
from tests.metagpt.serialize_deserialize.test_serdeser_base import serdeser_path
def test_memory_serdeser(context):
msg1 = Message(role="Boss", content="write a snake game", cause_by=UserRequirement)
out_mapping = {"field2": (list[str], ...)}
out_data = {"field2": ["field2 value1", "field2 value2"]}
ic_obj = ActionNode.create_model_class("system_design", out_mapping)
msg2 = Message(
role="Architect", instruct_content=ic_obj(**out_data), content="system design content", cause_by=WriteDesign
)
memory = Memory()
memory.add_batch([msg1, msg2])
ser_data = memory.model_dump()
new_memory = Memory(**ser_data)
assert new_memory.count() == 2
new_msg2 = new_memory.get(2)[0]
assert isinstance(new_msg2, BaseModel)
assert isinstance(new_memory.storage[-1], BaseModel)
assert new_memory.storage[-1].cause_by == any_to_str(WriteDesign)
assert new_msg2.role == "Boss"
memory = Memory(storage=[msg1, msg2], index={msg1.cause_by: [msg1], msg2.cause_by: [msg2]})
assert memory.count() == 2
def test_memory_serdeser_save(context):
msg1 = Message(role="User", content="write a 2048 game", cause_by=UserRequirement)
out_mapping = {"field1": (list[str], ...)}
out_data = {"field1": ["field1 value1", "field1 value2"]}
ic_obj = ActionNode.create_model_class("system_design", out_mapping)
msg2 = Message(
role="Architect", instruct_content=ic_obj(**out_data), content="system design content", cause_by=WriteDesign
)
memory = Memory()
memory.add_batch([msg1, msg2])
stg_path = serdeser_path.joinpath("team", "environment")
memory_path = stg_path.joinpath("memory.json")
write_json_file(memory_path, memory.model_dump())
assert memory_path.exists()
memory_dict = read_json_file(memory_path)
new_memory = Memory(**memory_dict)
assert new_memory.count() == 2
new_msg2 = new_memory.get(1)[0]
assert new_msg2.instruct_content.field1 == ["field1 value1", "field1 value2"]
assert new_msg2.cause_by == any_to_str(WriteDesign)
assert len(new_memory.index) == 2
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/serialize_deserialize/test_polymorphic.py | tests/metagpt/serialize_deserialize/test_polymorphic.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : unittest of polymorphic conditions
import copy
from pydantic import BaseModel, ConfigDict, SerializeAsAny
from metagpt.actions import Action
from tests.metagpt.serialize_deserialize.test_serdeser_base import (
ActionOKV2,
ActionPass,
)
class ActionSubClasses(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
actions: list[SerializeAsAny[Action]] = []
class ActionSubClassesNoSAA(BaseModel):
"""without SerializeAsAny"""
model_config = ConfigDict(arbitrary_types_allowed=True)
actions: list[Action] = []
def test_serialize_as_any():
"""test subclasses of action with different fields in ser&deser"""
# ActionOKV2 with a extra field `extra_field`
action_subcls = ActionSubClasses(actions=[ActionOKV2(), ActionPass()])
action_subcls_dict = action_subcls.model_dump()
assert action_subcls_dict["actions"][0]["extra_field"] == ActionOKV2().extra_field
def test_no_serialize_as_any():
# ActionOKV2 with a extra field `extra_field`
action_subcls = ActionSubClassesNoSAA(actions=[ActionOKV2(), ActionPass()])
action_subcls_dict = action_subcls.model_dump()
# without `SerializeAsAny`, it will serialize as Action
assert "extra_field" not in action_subcls_dict["actions"][0]
def test_polymorphic():
ok_v2 = ActionOKV2(
**{"name": "ActionOKV2", "context": "", "prefix": "", "desc": "", "extra_field": "ActionOKV2 Extra Info"}
)
action_subcls = ActionSubClasses(actions=[ActionOKV2(), ActionPass()])
action_subcls_dict = action_subcls.model_dump()
action_subcls_dict2 = copy.deepcopy(action_subcls_dict)
assert "__module_class_name" in action_subcls_dict["actions"][0]
new_action_subcls = ActionSubClasses(**action_subcls_dict)
assert isinstance(new_action_subcls.actions[0], ActionOKV2)
assert new_action_subcls.actions[0].extra_field == ok_v2.extra_field
assert isinstance(new_action_subcls.actions[1], ActionPass)
new_action_subcls = ActionSubClasses.model_validate(action_subcls_dict2)
assert isinstance(new_action_subcls.actions[0], ActionOKV2)
assert isinstance(new_action_subcls.actions[1], ActionPass)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/serialize_deserialize/__init__.py | tests/metagpt/serialize_deserialize/__init__.py | # -*- coding: utf-8 -*-
# @Date : 11/22/2023 11:48 AM
# @Author : stellahong (stellahong@fuzhi.ai)
# @Desc :
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/serialize_deserialize/test_product_manager.py | tests/metagpt/serialize_deserialize/test_product_manager.py | # -*- coding: utf-8 -*-
# @Date : 11/26/2023 2:07 PM
# @Author : stellahong (stellahong@fuzhi.ai)
# @Desc :
import pytest
from metagpt.actions.action import Action
from metagpt.roles.product_manager import ProductManager
from metagpt.schema import Message
@pytest.mark.asyncio
async def test_product_manager_serdeser(new_filename, context):
role = ProductManager(context=context)
ser_role_dict = role.model_dump(by_alias=True)
new_role = ProductManager(**ser_role_dict, context=context)
assert new_role.name == "Alice"
assert len(new_role.actions) == 2
assert isinstance(new_role.actions[0], Action)
await new_role.actions[0].run([Message(content="write a cli snake game")])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/serialize_deserialize/test_tutorial_assistant.py | tests/metagpt/serialize_deserialize/test_tutorial_assistant.py | # -*- coding: utf-8 -*-
# @Desc :
import pytest
from metagpt.actions.write_tutorial import WriteDirectory
from metagpt.roles.tutorial_assistant import TutorialAssistant
@pytest.mark.asyncio
async def test_tutorial_assistant_serdeser(context):
role = TutorialAssistant()
ser_role_dict = role.model_dump()
assert "name" in ser_role_dict
assert "language" in ser_role_dict
assert "topic" in ser_role_dict
new_role = TutorialAssistant(**ser_role_dict)
assert new_role.name == "Stitch"
assert len(new_role.actions) == 1
assert isinstance(new_role.actions[0], WriteDirectory)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/serialize_deserialize/test_schema.py | tests/metagpt/serialize_deserialize/test_schema.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : unittest of schema ser&deser
import pytest
from metagpt.actions.action_node import ActionNode
from metagpt.actions.write_code import WriteCode
from metagpt.schema import CodingContext, Document, Documents, Message, TestingContext
from metagpt.utils.common import any_to_str
from tests.metagpt.serialize_deserialize.test_serdeser_base import (
MockICMessage,
MockMessage,
)
def test_message_serdeser_from_create_model():
with pytest.raises(KeyError):
_ = Message(content="code", instruct_content={"class": "test", "key": "value"})
out_mapping = {"field3": (str, ...), "field4": (list[str], ...)}
out_data = {"field3": "field3 value3", "field4": ["field4 value1", "field4 value2"]}
ic_obj = ActionNode.create_model_class("code", out_mapping)
ic_inst = ic_obj(**out_data)
message = Message(content="code", instruct_content=ic_inst, role="engineer", cause_by=WriteCode)
ser_data = message.model_dump()
assert ser_data["cause_by"] == "metagpt.actions.write_code.WriteCode"
assert ser_data["instruct_content"]["class"] == "code"
new_message = Message(**ser_data)
assert new_message.cause_by == any_to_str(WriteCode)
assert new_message.cause_by in [any_to_str(WriteCode)]
assert new_message.instruct_content == ic_obj(**out_data)
assert new_message.instruct_content == ic_inst
assert new_message.instruct_content.model_dump() == ic_obj(**out_data).model_dump()
assert new_message == message
mock_msg = MockMessage()
message = Message(content="test_ic", instruct_content=mock_msg)
ser_data = message.model_dump()
new_message = Message(**ser_data)
assert new_message.instruct_content == mock_msg
assert new_message == message
def test_message_without_postprocess():
"""to explain `instruct_content` from `create_model_class` should be postprocessed"""
out_mapping = {"field1": (list[str], ...)}
out_data = {"field1": ["field1 value1", "field1 value2"]}
ic_obj = ActionNode.create_model_class("code", out_mapping)
message = MockICMessage(content="code", instruct_content=ic_obj(**out_data))
ser_data = message.model_dump()
assert ser_data["instruct_content"] == {}
ser_data["instruct_content"] = None
new_message = MockICMessage(**ser_data)
assert new_message.instruct_content != ic_obj(**out_data)
assert new_message != message
def test_message_serdeser_from_basecontext():
doc_msg = Message(content="test_document", instruct_content=Document(content="test doc"))
ser_data = doc_msg.model_dump()
assert ser_data["instruct_content"]["value"]["content"] == "test doc"
assert ser_data["instruct_content"]["value"]["filename"] == ""
docs_msg = Message(
content="test_documents", instruct_content=Documents(docs={"doc1": Document(content="test doc")})
)
ser_data = docs_msg.model_dump()
assert ser_data["instruct_content"]["class"] == "Documents"
assert ser_data["instruct_content"]["value"]["docs"]["doc1"]["content"] == "test doc"
assert ser_data["instruct_content"]["value"]["docs"]["doc1"]["filename"] == ""
code_ctxt = CodingContext(
filename="game.py",
design_doc=Document(root_path="docs/system_design", filename="xx.json", content="xxx"),
task_doc=Document(root_path="docs/tasks", filename="xx.json", content="xxx"),
code_doc=Document(root_path="xxx", filename="game.py", content="xxx"),
)
code_ctxt_msg = Message(content="coding_context", instruct_content=code_ctxt)
ser_data = code_ctxt_msg.model_dump()
assert ser_data["instruct_content"]["class"] == "CodingContext"
new_code_ctxt_msg = Message(**ser_data)
assert new_code_ctxt_msg.instruct_content == code_ctxt
assert new_code_ctxt_msg.instruct_content.code_doc.filename == "game.py"
assert new_code_ctxt_msg == code_ctxt_msg
testing_ctxt = TestingContext(
filename="test.py",
code_doc=Document(root_path="xxx", filename="game.py", content="xxx"),
test_doc=Document(root_path="docs/tests", filename="test.py", content="xxx"),
)
testing_ctxt_msg = Message(content="testing_context", instruct_content=testing_ctxt)
ser_data = testing_ctxt_msg.model_dump()
new_testing_ctxt_msg = Message(**ser_data)
assert new_testing_ctxt_msg.instruct_content == testing_ctxt
assert new_testing_ctxt_msg.instruct_content.test_doc.filename == "test.py"
assert new_testing_ctxt_msg == testing_ctxt_msg
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/serialize_deserialize/test_write_code.py | tests/metagpt/serialize_deserialize/test_write_code.py | # -*- coding: utf-8 -*-
# @Date : 11/23/2023 10:56 AM
# @Author : stellahong (stellahong@fuzhi.ai)
# @Desc :
import pytest
from metagpt.actions import WriteCode
from metagpt.schema import CodingContext, Document
def test_write_design_serdeser(context):
action = WriteCode(context=context)
ser_action_dict = action.model_dump()
assert ser_action_dict["name"] == "WriteCode"
assert "llm" not in ser_action_dict # not export
@pytest.mark.asyncio
async def test_write_code_serdeser(context):
context.src_workspace = context.repo.workdir / "srcs"
coding_context = CodingContext(
filename="test_code.py", design_doc=Document(content="write add function to calculate two numbers")
)
doc = Document(content=coding_context.model_dump_json())
action = WriteCode(i_context=doc, context=context)
serialized_data = action.model_dump()
new_action = WriteCode(**serialized_data, context=context)
assert new_action.name == "WriteCode"
await action.run()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/serialize_deserialize/test_write_prd.py | tests/metagpt/serialize_deserialize/test_write_prd.py | # -*- coding: utf-8 -*-
# @Date : 11/22/2023 1:47 PM
# @Author : stellahong (stellahong@fuzhi.ai)
# @Desc :
import pytest
from metagpt.actions import WritePRD
from metagpt.schema import Message
@pytest.mark.asyncio
async def test_action_serdeser(new_filename, context):
action = WritePRD(context=context)
ser_action_dict = action.model_dump()
assert "name" in ser_action_dict
assert "llm" not in ser_action_dict # not export
new_action = WritePRD(**ser_action_dict, context=context)
assert new_action.name == "WritePRD"
with pytest.raises(FileNotFoundError):
await new_action.run(with_messages=Message(content="write a cli snake game"))
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/serialize_deserialize/test_write_design.py | tests/metagpt/serialize_deserialize/test_write_design.py | # -*- coding: utf-8 -*-
# @Date : 11/22/2023 8:19 PM
# @Author : stellahong (stellahong@fuzhi.ai)
# @Desc :
import pytest
from metagpt.actions import WriteDesign, WriteTasks
@pytest.mark.asyncio
async def test_write_design_serialize(context):
action = WriteDesign(context=context)
ser_action_dict = action.model_dump()
assert "name" in ser_action_dict
assert "llm" not in ser_action_dict # not export
new_action = WriteDesign(**ser_action_dict, context=context)
assert new_action.name == "WriteDesign"
await new_action.run(with_messages="write a cli snake game")
@pytest.mark.asyncio
async def test_write_task_serialize(context):
action = WriteTasks(context=context)
ser_action_dict = action.model_dump()
assert "name" in ser_action_dict
assert "llm" not in ser_action_dict # not export
new_action = WriteTasks(**ser_action_dict, context=context)
assert new_action.name == "WriteTasks"
await new_action.run(with_messages="write a cli snake game")
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/test_session.py | tests/metagpt/utils/test_session.py | #!/usr/bin/env python3
# _*_ coding: utf-8 _*_
import pytest
def test_nodeid(request):
print(request.node.nodeid)
assert request.node.nodeid
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/utils/test_ahttp_client.py | tests/metagpt/utils/test_ahttp_client.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : unittest of ahttp_client
import pytest
from metagpt.utils.ahttp_client import apost, apost_stream
@pytest.mark.asyncio
async def test_apost():
result = await apost(url="https://www.baidu.com/")
assert "百度一下" in result
result = await apost(
url="http://aider.meizu.com/app/weather/listWeather", data={"cityIds": "101240101"}, as_json=True
)
assert result["code"] == "200"
@pytest.mark.asyncio
async def test_apost_stream():
result = apost_stream(url="https://www.baidu.com/")
async for line in result:
assert len(line) >= 0
result = apost_stream(url="http://aider.meizu.com/app/weather/listWeather", data={"cityIds": "101240101"})
async for line in result:
assert len(line) >= 0
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.