repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/recursive_error.py | examples/recursive_error.py | """
Demonstrates Rich tracebacks for recursion errors.
Rich can exclude frames in the middle to avoid huge tracebacks.
"""
from rich.console import Console
def foo(n):
return bar(n)
def bar(n):
return foo(n)
console = Console()
try:
foo(1)
except Exception:
console.print_exception(max_frames=20)
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/exception.py | examples/exception.py | """
Basic example to show how to print an traceback of an exception
"""
from typing import List, Tuple
from rich.console import Console
console = Console()
def divide_by(number: float, divisor: float) -> float:
"""Divide any number by zero."""
# Will throw a ZeroDivisionError if divisor is 0
result = number / divisor
return result
def divide_all(divides: List[Tuple[float, float]]) -> None:
"""Do something impossible every day."""
for number, divisor in divides:
console.print(f"dividing {number} by {divisor}")
try:
result = divide_by(number, divisor)
except Exception:
console.print_exception(extra_lines=8, show_locals=True)
else:
console.print(f" = {result}")
DIVIDES = [
(1000, 200),
(10000, 500),
(1, 0),
(0, 1000000),
(3.1427, 2),
(888, 0),
(2**32, 2**16),
]
divide_all(DIVIDES)
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/table.py | examples/table.py | """
Demonstrates how to render a table.
"""
from rich.console import Console
from rich.table import Table
table = Table(title="Star Wars Movies")
table.add_column("Released", style="cyan", no_wrap=True)
table.add_column("Title", style="magenta")
table.add_column("Box Office", justify="right", style="green")
table.add_row("Dec 20, 2019", "Star Wars: The Rise of Skywalker", "$952,110,690")
table.add_row("May 25, 2018", "Solo: A Star Wars Story", "$393,151,347")
table.add_row("Dec 15, 2017", "Star Wars Ep. V111: The Last Jedi", "$1,332,539,889")
table.add_row("Dec 16, 2016", "Rogue One: A Star Wars Story", "$1,332,439,889")
console = Console()
console.print(table, justify="center")
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/justify.py | examples/justify.py | """
This example demonstrates the justify argument to print.
"""
from rich.console import Console
console = Console(width=20)
style = "bold white on blue"
console.print("Rich", style=style)
console.print("Rich", style=style, justify="left")
console.print("Rich", style=style, justify="center")
console.print("Rich", style=style, justify="right")
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/link.py | examples/link.py | from rich import print
print("If your terminal supports links, the following text should be clickable:")
print("[link=https://www.willmcgugan.com][i]Visit [red]my[/red][/i] [yellow]Blog[/]")
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/export.py | examples/export.py | """
Demonstrates export console output
"""
from rich.console import Console
from rich.table import Table
console = Console(record=True)
def print_table():
table = Table(title="Star Wars Movies")
table.add_column("Released", style="cyan", no_wrap=True)
table.add_column("Title", style="magenta")
table.add_column("Box Office", justify="right", style="green")
table.add_row("Dec 20, 2019", "Star Wars: The Rise of Skywalker", "$952,110,690")
table.add_row("May 25, 2018", "Solo: A Star Wars Story", "$393,151,347")
table.add_row("Dec 15, 2017", "Star Wars Ep. V111: The Last Jedi", "$1,332,539,889")
table.add_row("Dec 16, 2016", "Rogue One: A Star Wars Story", "$1,332,439,889")
console.print(table)
# Prints table
print_table()
# Get console output as text
file1 = "table_export_plaintext.txt"
text = console.export_text()
with open(file1, "w") as file:
file.write(text)
print(f"Exported console output as plain text to {file1}")
# Calling print_table again because console output buffer
# is flushed once export function is called
print_table()
# Get console output as html
# use clear=False so output is not flushed after export
file2 = "table_export_html.html"
html = console.export_html(clear=False)
with open(file2, "w") as file:
file.write(html)
print(f"Exported console output as html to {file2}")
# Export text output to table_export.txt
file3 = "table_export_plaintext2.txt"
console.save_text(file3, clear=False)
print(f"Exported console output as plain text to {file3}")
# Export html output to table_export.html
file4 = "table_export_html2.html"
console.save_html(file4)
print(f"Exported console output as html to {file4}")
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/tree.py | examples/tree.py | """
Demonstrates how to display a tree of files / directories with the Tree renderable.
"""
import os
import pathlib
import sys
from rich import print
from rich.filesize import decimal
from rich.markup import escape
from rich.text import Text
from rich.tree import Tree
def walk_directory(directory: pathlib.Path, tree: Tree) -> None:
"""Recursively build a Tree with directory contents."""
# Sort dirs first then by filename
paths = sorted(
pathlib.Path(directory).iterdir(),
key=lambda path: (path.is_file(), path.name.lower()),
)
for path in paths:
# Remove hidden files
if path.name.startswith("."):
continue
if path.is_dir():
style = "dim" if path.name.startswith("__") else ""
branch = tree.add(
f"[bold magenta]:open_file_folder: [link file://{path}]{escape(path.name)}",
style=style,
guide_style=style,
)
walk_directory(path, branch)
else:
text_filename = Text(path.name, "green")
text_filename.highlight_regex(r"\..*$", "bold red")
text_filename.stylize(f"link file://{path}")
file_size = path.stat().st_size
text_filename.append(f" ({decimal(file_size)})", "blue")
icon = "🐍 " if path.suffix == ".py" else "📄 "
tree.add(Text(icon) + text_filename)
try:
directory = os.path.abspath(sys.argv[1])
except IndexError:
print("[b]Usage:[/] python tree.py <DIRECTORY>")
else:
tree = Tree(
f":open_file_folder: [link file://{directory}]{directory}",
guide_style="bold bright_blue",
)
walk_directory(pathlib.Path(directory), tree)
print(tree)
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/suppress.py | examples/suppress.py | try:
import click
except ImportError:
print("Please install click for this example")
print(" pip install click")
exit()
from rich.traceback import install
install(suppress=[click])
@click.command()
@click.option("--count", default=1, help="Number of greetings.")
def hello(count):
"""Simple program that greets NAME for a total of COUNT times."""
1 / 0
for x in range(count):
click.echo(f"Hello {name}!")
if __name__ == "__main__":
hello()
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/fullscreen.py | examples/fullscreen.py | """
Demonstrates a Rich "application" using the Layout and Live classes.
"""
from datetime import datetime
from rich import box
from rich.align import Align
from rich.console import Console, Group
from rich.layout import Layout
from rich.panel import Panel
from rich.progress import BarColumn, Progress, SpinnerColumn, TextColumn
from rich.syntax import Syntax
from rich.table import Table
console = Console()
def make_layout() -> Layout:
"""Define the layout."""
layout = Layout(name="root")
layout.split(
Layout(name="header", size=3),
Layout(name="main", ratio=1),
Layout(name="footer", size=7),
)
layout["main"].split_row(
Layout(name="side"),
Layout(name="body", ratio=2, minimum_size=60),
)
layout["side"].split(Layout(name="box1"), Layout(name="box2"))
return layout
def make_sponsor_message() -> Panel:
"""Some example content."""
sponsor_message = Table.grid(padding=1)
sponsor_message.add_column(style="green", justify="right")
sponsor_message.add_column(no_wrap=True)
sponsor_message.add_row(
"Twitter",
"[u blue link=https://twitter.com/textualize]https://twitter.com/textualize",
)
sponsor_message.add_row(
"CEO",
"[u blue link=https://twitter.com/willmcgugan]https://twitter.com/willmcgugan",
)
sponsor_message.add_row(
"Textualize", "[u blue link=https://www.textualize.io]https://www.textualize.io"
)
message = Table.grid(padding=1)
message.add_column()
message.add_column(no_wrap=True)
message.add_row(sponsor_message)
message_panel = Panel(
Align.center(
Group("\n", Align.center(sponsor_message)),
vertical="middle",
),
box=box.ROUNDED,
padding=(1, 2),
title="[b red]Thanks for trying out Rich!",
border_style="bright_blue",
)
return message_panel
class Header:
"""Display header with clock."""
def __rich__(self) -> Panel:
grid = Table.grid(expand=True)
grid.add_column(justify="center", ratio=1)
grid.add_column(justify="right")
grid.add_row(
"[b]Rich[/b] Layout application",
datetime.now().ctime().replace(":", "[blink]:[/]"),
)
return Panel(grid, style="white on blue")
def make_syntax() -> Syntax:
code = """\
def ratio_resolve(total: int, edges: List[Edge]) -> List[int]:
sizes = [(edge.size or None) for edge in edges]
# While any edges haven't been calculated
while any(size is None for size in sizes):
# Get flexible edges and index to map these back on to sizes list
flexible_edges = [
(index, edge)
for index, (size, edge) in enumerate(zip(sizes, edges))
if size is None
]
# Remaining space in total
remaining = total - sum(size or 0 for size in sizes)
if remaining <= 0:
# No room for flexible edges
sizes[:] = [(size or 0) for size in sizes]
break
# Calculate number of characters in a ratio portion
portion = remaining / sum((edge.ratio or 1) for _, edge in flexible_edges)
# If any edges will be less than their minimum, replace size with the minimum
for index, edge in flexible_edges:
if portion * edge.ratio <= edge.minimum_size:
sizes[index] = edge.minimum_size
break
else:
# Distribute flexible space and compensate for rounding error
# Since edge sizes can only be integers we need to add the remainder
# to the following line
_modf = modf
remainder = 0.0
for index, edge in flexible_edges:
remainder, size = _modf(portion * edge.ratio + remainder)
sizes[index] = int(size)
break
# Sizes now contains integers only
return cast(List[int], sizes)
"""
syntax = Syntax(code, "python", line_numbers=True)
return syntax
job_progress = Progress(
"{task.description}",
SpinnerColumn(),
BarColumn(),
TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
)
job_progress.add_task("[green]Cooking")
job_progress.add_task("[magenta]Baking", total=200)
job_progress.add_task("[cyan]Mixing", total=400)
total = sum(task.total for task in job_progress.tasks)
overall_progress = Progress()
overall_task = overall_progress.add_task("All Jobs", total=int(total))
progress_table = Table.grid(expand=True)
progress_table.add_row(
Panel(
overall_progress,
title="Overall Progress",
border_style="green",
padding=(2, 2),
),
Panel(job_progress, title="[b]Jobs", border_style="red", padding=(1, 2)),
)
layout = make_layout()
layout["header"].update(Header())
layout["body"].update(make_sponsor_message())
layout["box2"].update(Panel(make_syntax(), border_style="green"))
layout["box1"].update(Panel(layout.tree, border_style="red"))
layout["footer"].update(progress_table)
from time import sleep
from rich.live import Live
with Live(layout, refresh_per_second=10, screen=True):
while not overall_progress.finished:
sleep(0.1)
for job in job_progress.tasks:
if not job.finished:
job_progress.advance(job.id)
completed = sum(task.completed for task in job_progress.tasks)
overall_progress.update(overall_task, completed=completed)
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/group2.py | examples/group2.py | from rich import print
from rich.console import group
from rich.panel import Panel
@group()
def get_panels():
yield Panel("Hello", style="on blue")
yield Panel("World", style="on red")
print(Panel(get_panels()))
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/log.py | examples/log.py | """
A simulation of Rich console logging.
"""
import time
from rich.console import Console
from rich.style import Style
from rich.theme import Theme
from rich.highlighter import RegexHighlighter
class RequestHighlighter(RegexHighlighter):
base_style = "req."
highlights = [
r"^(?P<protocol>\w+) (?P<method>\w+) (?P<path>\S+) (?P<result>\w+) (?P<stats>\[.+\])$",
r"\/(?P<filename>\w+\..{3,4})",
]
theme = Theme(
{
"req.protocol": Style.parse("dim bold green"),
"req.method": Style.parse("bold cyan"),
"req.path": Style.parse("magenta"),
"req.filename": Style.parse("bright_magenta"),
"req.result": Style.parse("yellow"),
"req.stats": Style.parse("dim"),
}
)
console = Console(theme=theme)
console.log("Server starting...")
console.log("Serving on http://127.0.0.1:8000")
time.sleep(1)
request_highlighter = RequestHighlighter()
console.log(
request_highlighter("HTTP GET /foo/bar/baz/egg.html 200 [0.57, 127.0.0.1:59076]"),
)
console.log(
request_highlighter(
"HTTP GET /foo/bar/baz/background.jpg 200 [0.57, 127.0.0.1:59076]"
),
)
time.sleep(1)
def test_locals():
foo = (1, 2, 3)
movies = ["Deadpool", "Rise of the Skywalker"]
console = Console()
console.log(
"[b]JSON[/b] RPC [i]batch[/i]",
[
{"jsonrpc": "2.0", "method": "sum", "params": [1, 2, 4], "id": "1"},
{"jsonrpc": "2.0", "method": "notify_hello", "params": [7]},
{"jsonrpc": "2.0", "method": "subtract", "params": [42, 23], "id": "2"},
{"foo": "boo"},
{
"jsonrpc": "2.0",
"method": "foo.get",
"params": {"name": "myself", "enable": False, "grommits": None},
"id": "5",
},
{"jsonrpc": "2.0", "method": "get_data", "id": "9"},
],
log_locals=True,
)
test_locals()
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/overflow.py | examples/overflow.py | from typing import List
from rich.console import Console, OverflowMethod
console = Console(width=14)
supercali = "supercalifragilisticexpialidocious"
overflow_methods: List[OverflowMethod] = ["fold", "crop", "ellipsis"]
for overflow in overflow_methods:
console.rule(overflow)
console.print(supercali, overflow=overflow, style="bold blue")
console.print()
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/bars.py | examples/bars.py | """
Use Bar to renderer a sort-of circle.
"""
import math
from rich.align import Align
from rich.bar import Bar
from rich.color import Color
from rich import print
SIZE = 40
for row in range(SIZE):
y = (row / (SIZE - 1)) * 2 - 1
x = math.sqrt(1 - y * y)
color = Color.from_rgb((1 + y) * 127.5, 0, 0)
bar = Bar(2, width=SIZE * 2, begin=1 - x, end=1 + x, color=color)
print(Align.center(bar))
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/attrs.py | examples/attrs.py | from typing import List
try:
import attr
except ImportError:
print("This example requires attrs library")
print("pip install attrs")
raise SystemExit()
@attr.define
class Point3D:
x: float
y: float
z: float = 0
@attr.define
class Triangle:
point1: Point3D
point2: Point3D
point3: Point3D
@attr.define
class Model:
name: str
triangles: List[Triangle] = attr.Factory(list)
if __name__ == "__main__":
model = Model(
name="Alien#1",
triangles=[
Triangle(
Point3D(x=20, y=50),
Point3D(x=50, y=15, z=-45.34),
Point3D(3.1426, 83.2323, -16),
)
],
)
from rich.console import Console
from rich.pretty import Pretty
from rich.table import Column, Table
from rich.text import Text
console = Console()
table = Table("attrs *with* Rich", Column(Text.from_markup("attrs *without* Rich")))
table.add_row(Pretty(model), repr(model))
console.print(table)
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/jobs.py | examples/jobs.py | from time import sleep
from rich.panel import Panel
from rich.progress import Progress
JOBS = [100, 150, 25, 70, 110, 90]
progress = Progress(auto_refresh=False)
master_task = progress.add_task("overall", total=sum(JOBS))
jobs_task = progress.add_task("jobs")
progress.console.print(
Panel(
"[bold blue]A demonstration of progress with a current task and overall progress.",
padding=1,
)
)
with progress:
for job_no, job in enumerate(JOBS):
progress.log(f"Starting job #{job_no}")
sleep(0.2)
progress.reset(jobs_task, total=job, description=f"job [bold yellow]#{job_no}")
progress.start_task(jobs_task)
for wait in progress.track(range(job), task_id=jobs_task):
sleep(0.01)
progress.advance(master_task, job)
progress.log(f"Job #{job_no} is complete")
progress.log(
Panel(":sparkle: All done! :sparkle:", border_style="green", padding=1)
)
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/top_lite_simulator.py | examples/top_lite_simulator.py | """Lite simulation of the top linux command."""
import datetime
import random
import time
from dataclasses import dataclass
from rich import box
from rich.console import Console
from rich.live import Live
from rich.table import Table
from typing import Literal
@dataclass
class Process:
pid: int
command: str
cpu_percent: float
memory: int
start_time: datetime.datetime
thread_count: int
state: Literal["running", "sleeping"]
@property
def memory_str(self) -> str:
if self.memory > 1e6:
return f"{int(self.memory/1e6)}M"
if self.memory > 1e3:
return f"{int(self.memory/1e3)}K"
return str(self.memory)
@property
def time_str(self) -> str:
return str(datetime.datetime.now() - self.start_time)
def generate_process(pid: int) -> Process:
return Process(
pid=pid,
command=f"Process {pid}",
cpu_percent=random.random() * 20,
memory=random.randint(10, 200) ** 3,
start_time=datetime.datetime.now()
- datetime.timedelta(seconds=random.randint(0, 500) ** 2),
thread_count=random.randint(1, 32),
state="running" if random.randint(0, 10) < 8 else "sleeping",
)
def create_process_table(height: int) -> Table:
processes = sorted(
[generate_process(pid) for pid in range(height)],
key=lambda p: p.cpu_percent,
reverse=True,
)
table = Table(
"PID", "Command", "CPU %", "Memory", "Time", "Thread #", "State", box=box.SIMPLE
)
for process in processes:
table.add_row(
str(process.pid),
process.command,
f"{process.cpu_percent:.1f}",
process.memory_str,
process.time_str,
str(process.thread_count),
process.state,
)
return table
console = Console()
with Live(console=console, screen=True, auto_refresh=False) as live:
while True:
live.update(create_process_table(console.size.height - 4), refresh=True)
time.sleep(1)
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/screen.py | examples/screen.py | """
Demonstration of Console.screen()
"""
from time import sleep
from rich.align import Align
from rich.console import Console
from rich.panel import Panel
console = Console()
with console.screen(style="bold white on red") as screen:
text = Align.center("[blink]Don't Panic![/blink]", vertical="middle")
screen.update(Panel(text))
sleep(5)
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/group.py | examples/group.py | from rich import print
from rich.console import Group
from rich.panel import Panel
panel_group = Group(
Panel("Hello", style="on blue"),
Panel("World", style="on red"),
)
print(Panel(panel_group))
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/downloader.py | examples/downloader.py | """
A rudimentary URL downloader (like wget or curl) to demonstrate Rich progress bars.
"""
import os.path
import sys
from concurrent.futures import ThreadPoolExecutor
import signal
from functools import partial
from threading import Event
from typing import Iterable
from urllib.request import urlopen
from rich.progress import (
BarColumn,
DownloadColumn,
Progress,
TaskID,
TextColumn,
TimeRemainingColumn,
TransferSpeedColumn,
)
progress = Progress(
TextColumn("[bold blue]{task.fields[filename]}", justify="right"),
BarColumn(bar_width=None),
"[progress.percentage]{task.percentage:>3.1f}%",
"•",
DownloadColumn(),
"•",
TransferSpeedColumn(),
"•",
TimeRemainingColumn(),
)
done_event = Event()
def handle_sigint(signum, frame):
done_event.set()
signal.signal(signal.SIGINT, handle_sigint)
def copy_url(task_id: TaskID, url: str, path: str) -> None:
"""Copy data from a url to a local file."""
progress.console.log(f"Requesting {url}")
response = urlopen(url)
# This will break if the response doesn't contain content length
progress.update(task_id, total=int(response.info()["Content-length"]))
with open(path, "wb") as dest_file:
progress.start_task(task_id)
for data in iter(partial(response.read, 32768), b""):
dest_file.write(data)
progress.update(task_id, advance=len(data))
if done_event.is_set():
return
progress.console.log(f"Downloaded {path}")
def download(urls: Iterable[str], dest_dir: str):
"""Download multiple files to the given directory."""
with progress:
with ThreadPoolExecutor(max_workers=4) as pool:
for url in urls:
filename = url.split("/")[-1]
dest_path = os.path.join(dest_dir, filename)
task_id = progress.add_task("download", filename=filename, start=False)
pool.submit(copy_url, task_id, url, dest_path)
if __name__ == "__main__":
# Try with https://releases.ubuntu.com/noble/ubuntu-24.04-desktop-amd64.iso
# and https://releases.ubuntu.com/noble/ubuntu-24.04-live-server-amd64.iso
if sys.argv[1:]:
download(sys.argv[1:], "./")
else:
print("Usage:\n\tpython downloader.py URL1 URL2 URL3 (etc)")
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/highlighter.py | examples/highlighter.py | """
This example demonstrates a simple text highlighter.
"""
from rich.console import Console
from rich.highlighter import RegexHighlighter
from rich.theme import Theme
class EmailHighlighter(RegexHighlighter):
"""Apply style to anything that looks like an email."""
base_style = "example."
highlights = [r"(?P<email>[\w-]+@([\w-]+\.)+[\w-]+)"]
theme = Theme({"example.email": "bold magenta"})
console = Console(highlighter=EmailHighlighter(), theme=theme)
console.print("Send funds to money@example.org")
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/dynamic_progress.py | examples/dynamic_progress.py | """
Demonstrates how to create a dynamic group of progress bars,
showing multi-level progress for multiple tasks (installing apps in the example),
each of which consisting of multiple steps.
"""
import time
from rich.console import Group
from rich.panel import Panel
from rich.live import Live
from rich.progress import (
BarColumn,
Progress,
SpinnerColumn,
TextColumn,
TimeElapsedColumn,
)
def run_steps(name, step_times, app_steps_task_id):
"""Run steps for a single app, and update corresponding progress bars."""
for idx, step_time in enumerate(step_times):
# add progress bar for this step (time elapsed + spinner)
action = step_actions[idx]
step_task_id = step_progress.add_task("", action=action, name=name)
# run steps, update progress
for _ in range(step_time):
time.sleep(0.5)
step_progress.update(step_task_id, advance=1)
# stop and hide progress bar for this step when done
step_progress.stop_task(step_task_id)
step_progress.update(step_task_id, visible=False)
# also update progress bar for current app when step is done
app_steps_progress.update(app_steps_task_id, advance=1)
# progress bar for current app showing only elapsed time,
# which will stay visible when app is installed
current_app_progress = Progress(
TimeElapsedColumn(),
TextColumn("{task.description}"),
)
# progress bars for single app steps (will be hidden when step is done)
step_progress = Progress(
TextColumn(" "),
TimeElapsedColumn(),
TextColumn("[bold purple]{task.fields[action]}"),
SpinnerColumn("simpleDots"),
)
# progress bar for current app (progress in steps)
app_steps_progress = Progress(
TextColumn(
"[bold blue]Progress for app {task.fields[name]}: {task.percentage:.0f}%"
),
BarColumn(),
TextColumn("({task.completed} of {task.total} steps done)"),
)
# overall progress bar
overall_progress = Progress(
TimeElapsedColumn(), BarColumn(), TextColumn("{task.description}")
)
# group of progress bars;
# some are always visible, others will disappear when progress is complete
progress_group = Group(
Panel(Group(current_app_progress, step_progress, app_steps_progress)),
overall_progress,
)
# tuple specifies how long each step takes for that app
step_actions = ("downloading", "configuring", "building", "installing")
apps = [
("one", (2, 1, 4, 2)),
("two", (1, 3, 8, 4)),
("three", (2, 1, 3, 2)),
]
# create overall progress bar
overall_task_id = overall_progress.add_task("", total=len(apps))
# use own live instance as context manager with group of progress bars,
# which allows for running multiple different progress bars in parallel,
# and dynamically showing/hiding them
with Live(progress_group):
for idx, (name, step_times) in enumerate(apps):
# update message on overall progress bar
top_descr = "[bold #AAAAAA](%d out of %d apps installed)" % (idx, len(apps))
overall_progress.update(overall_task_id, description=top_descr)
# add progress bar for steps of this app, and run the steps
current_task_id = current_app_progress.add_task("Installing app %s" % name)
app_steps_task_id = app_steps_progress.add_task(
"", total=len(step_times), name=name
)
run_steps(name, step_times, app_steps_task_id)
# stop and hide steps progress bar for this specific app
app_steps_progress.update(app_steps_task_id, visible=False)
current_app_progress.stop_task(current_task_id)
current_app_progress.update(
current_task_id, description="[bold green]App %s installed!" % name
)
# increase overall progress now this task is done
overall_progress.update(overall_task_id, advance=1)
# final update for message on overall progress bar
overall_progress.update(
overall_task_id, description="[bold green]%s apps installed, done!" % len(apps)
)
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/print_calendar.py | examples/print_calendar.py | """
Builds calendar layout using Columns and Tables.
Usage:
python print_calendar.py [YEAR]
Example:
python print_calendar.py 2021
"""
import argparse
import calendar
from datetime import datetime
from rich.align import Align
from rich import box
from rich.columns import Columns
from rich.console import Console
from rich.table import Table
from rich.text import Text
def print_calendar(year):
"""Print a calendar for a given year."""
today = datetime.today()
year = int(year)
cal = calendar.Calendar()
today_tuple = today.day, today.month, today.year
tables = []
for month in range(1, 13):
table = Table(
title=f"{calendar.month_name[month]} {year}",
style="green",
box=box.SIMPLE_HEAVY,
padding=0,
)
for week_day in cal.iterweekdays():
table.add_column(
"{:.3}".format(calendar.day_name[week_day]), justify="right"
)
month_days = cal.monthdayscalendar(year, month)
for weekdays in month_days:
days = []
for index, day in enumerate(weekdays):
day_label = Text(str(day or ""), style="magenta")
if index in (5, 6):
day_label.stylize("blue")
if day and (day, month, year) == today_tuple:
day_label.stylize("white on dark_red")
days.append(day_label)
table.add_row(*days)
tables.append(Align.center(table))
console = Console()
columns = Columns(tables, padding=1, expand=True)
console.rule(str(year))
console.print()
console.print(columns)
console.rule(str(year))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Rich calendar")
parser.add_argument("year", metavar="year", type=int)
args = parser.parse_args()
print_calendar(args.year)
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/file_progress.py | examples/file_progress.py | from time import sleep
from urllib.request import urlopen
from rich.progress import wrap_file
# Read a URL with urlopen
response = urlopen("https://www.textualize.io")
# Get the size from the headers
size = int(response.headers["Content-Length"])
# Wrap the response so that it update progress
with wrap_file(response, size) as file:
for line in file:
print(line.decode("utf-8"), end="")
sleep(0.1)
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/status.py | examples/status.py | from time import sleep
from rich.console import Console
console = Console()
console.print()
tasks = [f"task {n}" for n in range(1, 11)]
with console.status("[bold green]Working on tasks...") as status:
while tasks:
task = tasks.pop(0)
sleep(1)
console.log(f"{task} complete")
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/spinners.py | examples/spinners.py | from time import sleep
from rich.columns import Columns
from rich.panel import Panel
from rich.live import Live
from rich.text import Text
from rich.spinner import Spinner, SPINNERS
all_spinners = Columns(
[
Spinner(spinner_name, text=Text(repr(spinner_name), style="green"))
for spinner_name in sorted(SPINNERS)
],
column_first=True,
expand=True,
)
with Live(
Panel(all_spinners, title="Spinners", border_style="blue"),
refresh_per_second=20,
) as live:
while True:
sleep(0.1)
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/listdir.py | examples/listdir.py | """
A very simple `ls` clone.
If your terminal supports hyperlinks you should be able to launch files by clicking the filename
(usually with cmd / ctrl).
"""
import os
import sys
from rich import print
from rich.columns import Columns
from rich.text import Text
try:
root_path = sys.argv[1]
except IndexError:
print("Usage: python listdir.py DIRECTORY")
else:
def make_filename_text(filename):
path = os.path.abspath(os.path.join(root_path, filename))
text = Text(filename, style="bold blue" if os.path.isdir(path) else "default")
text.stylize(f"link file://{path}")
text.highlight_regex(r"\..*?$", "bold")
return text
filenames = [
filename for filename in os.listdir(root_path) if not filename.startswith(".")
]
filenames.sort(key=lambda filename: filename.lower())
filename_text = [make_filename_text(filename) for filename in filenames]
columns = Columns(filename_text, equal=True, column_first=True)
print(columns)
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/repr.py | examples/repr.py | import rich.repr
@rich.repr.auto
class Bird:
def __init__(self, name, eats=None, fly=True, extinct=False):
self.name = name
self.eats = list(eats) if eats else []
self.fly = fly
self.extinct = extinct
# Note that the repr is still generated without Rich
# Try commenting out the following line
from rich import print
BIRDS = {
"gull": Bird("gull", eats=["fish", "chips", "ice cream", "sausage rolls"]),
"penguin": Bird("penguin", eats=["fish"], fly=False),
"dodo": Bird("dodo", eats=["fruit"], fly=False, extinct=True),
}
print(BIRDS)
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/live_progress.py | examples/live_progress.py | """
Demonstrates the use of multiple Progress instances in a single Live display.
"""
from time import sleep
from rich.live import Live
from rich.panel import Panel
from rich.progress import Progress, SpinnerColumn, BarColumn, TextColumn
from rich.table import Table
job_progress = Progress(
"{task.description}",
SpinnerColumn(),
BarColumn(),
TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
)
job1 = job_progress.add_task("[green]Cooking")
job2 = job_progress.add_task("[magenta]Baking", total=200)
job3 = job_progress.add_task("[cyan]Mixing", total=400)
total = sum(task.total for task in job_progress.tasks)
overall_progress = Progress()
overall_task = overall_progress.add_task("All Jobs", total=int(total))
progress_table = Table.grid()
progress_table.add_row(
Panel.fit(
overall_progress, title="Overall Progress", border_style="green", padding=(2, 2)
),
Panel.fit(job_progress, title="[b]Jobs", border_style="red", padding=(1, 2)),
)
with Live(progress_table, refresh_per_second=10):
while not overall_progress.finished:
sleep(0.1)
for job in job_progress.tasks:
if not job.finished:
job_progress.advance(job.id)
completed = sum(task.completed for task in job_progress.tasks)
overall_progress.update(overall_task, completed=completed)
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/table_movie.py | examples/table_movie.py | """Same as the table_movie.py but uses Live to update"""
import time
from contextlib import contextmanager
from rich import box
from rich.align import Align
from rich.console import Console
from rich.live import Live
from rich.table import Table
from rich.text import Text
TABLE_DATA = [
[
"May 25, 1977",
"Star Wars Ep. [b]IV[/]: [i]A New Hope",
"$11,000,000",
"$1,554,475",
"$775,398,007",
],
[
"May 21, 1980",
"Star Wars Ep. [b]V[/]: [i]The Empire Strikes Back",
"$23,000,000",
"$4,910,483",
"$547,969,004",
],
[
"May 25, 1983",
"Star Wars Ep. [b]VI[/b]: [i]Return of the Jedi",
"$32,500,000",
"$23,019,618",
"$475,106,177",
],
[
"May 19, 1999",
"Star Wars Ep. [b]I[/b]: [i]The phantom Menace",
"$115,000,000",
"$64,810,870",
"$1,027,044,677",
],
[
"May 16, 2002",
"Star Wars Ep. [b]II[/b]: [i]Attack of the Clones",
"$115,000,000",
"$80,027,814",
"$656,695,615",
],
[
"May 19, 2005",
"Star Wars Ep. [b]III[/b]: [i]Revenge of the Sith",
"$115,500,000",
"$380,270,577",
"$848,998,877",
],
]
console = Console()
BEAT_TIME = 0.04
@contextmanager
def beat(length: int = 1) -> None:
yield
time.sleep(length * BEAT_TIME)
table = Table(show_footer=False)
table_centered = Align.center(table)
console.clear()
with Live(table_centered, console=console, screen=False, refresh_per_second=20):
with beat(10):
table.add_column("Release Date", no_wrap=True)
with beat(10):
table.add_column("Title", Text.from_markup("[b]Total", justify="right"))
with beat(10):
table.add_column("Budget", "[u]$412,000,000", no_wrap=True)
with beat(10):
table.add_column("Opening Weekend", "[u]$577,703,455", no_wrap=True)
with beat(10):
table.add_column("Box Office", "[u]$4,331,212,357", no_wrap=True)
with beat(10):
table.title = "Star Wars Box Office"
with beat(10):
table.title = (
"[not italic]:popcorn:[/] Star Wars Box Office [not italic]:popcorn:[/]"
)
with beat(10):
table.caption = "Made with Rich"
with beat(10):
table.caption = "Made with [b]Rich[/b]"
with beat(10):
table.caption = "Made with [b magenta not dim]Rich[/]"
for row in TABLE_DATA:
with beat(10):
table.add_row(*row)
with beat(10):
table.show_footer = True
table_width = console.measure(table).maximum
with beat(10):
table.columns[2].justify = "right"
with beat(10):
table.columns[3].justify = "right"
with beat(10):
table.columns[4].justify = "right"
with beat(10):
table.columns[2].header_style = "bold red"
with beat(10):
table.columns[3].header_style = "bold green"
with beat(10):
table.columns[4].header_style = "bold blue"
with beat(10):
table.columns[2].style = "red"
with beat(10):
table.columns[3].style = "green"
with beat(10):
table.columns[4].style = "blue"
with beat(10):
table.columns[0].style = "cyan"
table.columns[0].header_style = "bold cyan"
with beat(10):
table.columns[1].style = "magenta"
table.columns[1].header_style = "bold magenta"
with beat(10):
table.columns[2].footer_style = "bright_red"
with beat(10):
table.columns[3].footer_style = "bright_green"
with beat(10):
table.columns[4].footer_style = "bright_blue"
with beat(10):
table.row_styles = ["none", "dim"]
with beat(10):
table.border_style = "bright_yellow"
for box_style in [
box.SQUARE,
box.MINIMAL,
box.SIMPLE,
box.SIMPLE_HEAD,
]:
with beat(10):
table.box = box_style
with beat(10):
table.pad_edge = False
original_width = console.measure(table).maximum
for width in range(original_width, console.width, 2):
with beat(1):
table.width = width
for width in range(console.width, original_width, -2):
with beat(1):
table.width = width
for width in range(original_width, 90, -2):
with beat(1):
table.width = width
for width in range(90, original_width + 1, 2):
with beat(1):
table.width = width
with beat(2):
table.width = None
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/padding.py | examples/padding.py | from rich import print
from rich.padding import Padding
test = Padding("Hello", (2, 4), style="on blue", expand=False)
print(test)
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/layout.py | examples/layout.py | """
Demonstrates a dynamic Layout
"""
from datetime import datetime
from time import sleep
from rich.align import Align
from rich.console import Console
from rich.layout import Layout
from rich.live import Live
from rich.text import Text
console = Console()
layout = Layout()
layout.split(
Layout(name="header", size=1),
Layout(ratio=1, name="main"),
Layout(size=10, name="footer"),
)
layout["main"].split_row(Layout(name="side"), Layout(name="body", ratio=2))
layout["side"].split(Layout(), Layout())
layout["body"].update(
Align.center(
Text(
"""This is a demonstration of rich.Layout\n\nHit Ctrl+C to exit""",
justify="center",
),
vertical="middle",
)
)
class Clock:
"""Renders the time in the center of the screen."""
def __rich__(self) -> Text:
return Text(datetime.now().ctime(), style="bold magenta", justify="center")
layout["header"].update(Clock())
with Live(layout, screen=True, redirect_stderr=False) as live:
try:
while True:
sleep(1)
except KeyboardInterrupt:
pass
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
Textualize/rich | https://github.com/Textualize/rich/blob/53757bc234cf18977cade41a5b64f3abaccb0b85/examples/cp_progress.py | examples/cp_progress.py | """
A very minimal `cp` clone that displays a progress bar.
"""
import os
import shutil
import sys
from rich.progress import Progress
if __name__ == "__main__":
if len(sys.argv) == 3:
with Progress() as progress:
desc = os.path.basename(sys.argv[1])
with progress.open(sys.argv[1], "rb", description=desc) as src:
with open(sys.argv[2], "wb") as dst:
shutil.copyfileobj(src, dst)
else:
print("Copy a file with a progress bar.")
print("Usage:\n\tpython cp_progress.py SRC DST")
| python | MIT | 53757bc234cf18977cade41a5b64f3abaccb0b85 | 2026-01-04T14:39:17.105051Z | false |
minimaxir/big-list-of-naughty-strings | https://github.com/minimaxir/big-list-of-naughty-strings/blob/db33ec7b1d5d9616a88c76394b7d0897bd0b97eb/scripts/txt_to_json.py | scripts/txt_to_json.py | ### Quick Python Script to convert the Big List of Naughty Strings into a JSON file
###
### By Max Woolf
import json
with open('../blns.txt', 'r') as f:
# put all lines in the file into a Python list
content = f.readlines()
# above line leaves trailing newline characters; strip them out
content = [x.strip('\n') for x in content]
# remove empty-lines and comments
content = [x for x in content if x and not x.startswith('#')]
# insert empty string since all are being removed
content.insert(0, "")
# special case: convert "\" to "\\" for valid JSON
#content = map(lambda x: x.replace('\','\\'), content)
with open('../blns.json', 'wb') as f:
# write JSON to file; note the ensure_ascii parameter
json.dump(content, f, indent=2, ensure_ascii=False)
| python | MIT | db33ec7b1d5d9616a88c76394b7d0897bd0b97eb | 2026-01-04T14:39:35.128087Z | false |
minimaxir/big-list-of-naughty-strings | https://github.com/minimaxir/big-list-of-naughty-strings/blob/db33ec7b1d5d9616a88c76394b7d0897bd0b97eb/naughtystrings/__init__.py | naughtystrings/__init__.py | import os
FILEPATH = os.path.join(
os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'blns.txt')
"""Path to the file"""
def naughty_strings(filepath=FILEPATH):
"""Get the list of naughty_strings.
By default this will get the strings from the blns.txt file
Code is a simple port of what is already in the /scripts directory
:param filepath: Optional filepath the the blns.txt file
:returns: The list of naughty strings
"""
strings = []
with open(filepath, 'r') as f:
# put all lines in the file into a Python list
strings = f.readlines()
# above line leaves trailing newline characters; strip them out
strings = [x.strip(u'\n') for x in strings]
# remove empty-lines and comments
strings = [x for x in strings if x and not x.startswith(u'#')]
# insert empty string since all are being removed
strings.insert(0, u"")
return strings
| python | MIT | db33ec7b1d5d9616a88c76394b7d0897bd0b97eb | 2026-01-04T14:39:35.128087Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/setup.py | setup.py | """Setup script for MetaGPT."""
import subprocess
from pathlib import Path
from setuptools import Command, find_packages, setup
class InstallMermaidCLI(Command):
"""A custom command to run `npm install -g @mermaid-js/mermaid-cli` via a subprocess."""
description = "install mermaid-cli"
user_options = []
def run(self):
try:
subprocess.check_call(["npm", "install", "-g", "@mermaid-js/mermaid-cli"])
except subprocess.CalledProcessError as e:
print(f"Error occurred: {e.output}")
here = Path(__file__).resolve().parent
long_description = (here / "README.md").read_text(encoding="utf-8")
requirements = (here / "requirements.txt").read_text(encoding="utf-8").splitlines()
extras_require = {
"selenium": ["selenium>4", "webdriver_manager", "beautifulsoup4"],
"search-google": ["google-api-python-client==2.94.0"],
"search-ddg": ["duckduckgo-search~=4.1.1"],
# "ocr": ["paddlepaddle==2.4.2", "paddleocr~=2.7.3", "tabulate==0.9.0"],
"rag": [
"llama-index-core==0.10.15",
"llama-index-embeddings-azure-openai==0.1.6",
"llama-index-embeddings-openai==0.1.5",
"llama-index-embeddings-gemini==0.1.6",
"llama-index-embeddings-ollama==0.1.2",
"llama-index-llms-azure-openai==0.1.4",
"llama-index-readers-file==0.1.4",
"llama-index-retrievers-bm25==0.1.3",
"llama-index-vector-stores-faiss==0.1.1",
"llama-index-vector-stores-elasticsearch==0.1.6",
"llama-index-vector-stores-chroma==0.1.6",
"llama-index-postprocessor-cohere-rerank==0.1.4",
"llama-index-postprocessor-colbert-rerank==0.1.1",
"llama-index-postprocessor-flag-embedding-reranker==0.1.2",
"docx2txt==0.8",
],
}
extras_require["test"] = [
*set(i for j in extras_require.values() for i in j),
"pytest",
"pytest-asyncio",
"pytest-cov",
"pytest-mock",
"pytest-html",
"pytest-xdist",
"pytest-timeout",
"connexion[uvicorn]~=3.0.5",
"azure-cognitiveservices-speech~=1.31.0",
"aioboto3~=12.4.0",
"gradio==3.0.0",
"google-api-core==2.17.1",
"protobuf~=4.25.5",
"pylint==3.0.3",
"pybrowsers",
]
extras_require["pyppeteer"] = [
"pyppeteer>=1.0.2"
] # pyppeteer is unmaintained and there are conflicts with dependencies
extras_require["dev"] = (["pylint~=3.0.3", "black~=23.3.0", "isort~=5.12.0", "pre-commit~=3.6.0"],)
extras_require["android_assistant"] = [
"pyshine==0.0.9",
"opencv-python==4.6.0.66",
"protobuf<3.20,>=3.9.2",
"modelscope",
"tensorflow==2.9.1; os_name == 'linux'",
"tensorflow==2.9.1; os_name == 'win32'",
"tensorflow-macos==2.9; os_name == 'darwin'",
"keras==2.9.0",
"torch",
"torchvision",
"transformers",
"opencv-python",
"matplotlib",
"pycocotools",
"SentencePiece",
"tf_slim",
"tf_keras",
"pyclipper",
"shapely",
"groundingdino-py",
"datasets==2.18.0",
"clip-openai",
]
setup(
name="metagpt",
version="1.0.0",
description="The Multi-Agent Framework",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/geekan/MetaGPT",
author="Alexander Wu",
author_email="alexanderwu@deepwisdom.ai",
license="MIT",
keywords="metagpt multi-agent multi-role programming gpt llm metaprogramming",
packages=find_packages(exclude=["contrib", "docs", "examples", "tests*"]),
python_requires=">=3.9, <3.12",
install_requires=requirements,
extras_require=extras_require,
cmdclass={
"install_mermaid": InstallMermaidCLI,
},
entry_points={
"console_scripts": [
"metagpt=metagpt.software_company:app",
],
},
include_package_data=True,
)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/conftest.py | tests/conftest.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/1 12:10
@Author : alexanderwu
@File : conftest.py
"""
import asyncio
import json
import logging
import os
import re
import uuid
from typing import Callable
import aiohttp.web
import pytest
from metagpt.const import DEFAULT_WORKSPACE_ROOT, TEST_DATA_PATH
from metagpt.context import Context as MetagptContext
from metagpt.llm import LLM
from metagpt.logs import logger
from metagpt.utils.git_repository import GitRepository
from tests.mock.mock_aiohttp import MockAioResponse
from tests.mock.mock_curl_cffi import MockCurlCffiResponse
from tests.mock.mock_httplib2 import MockHttplib2Response
from tests.mock.mock_llm import MockLLM
RSP_CACHE_NEW = {} # used globally for producing new and useful only response cache
ALLOW_OPENAI_API_CALL = int(
os.environ.get("ALLOW_OPENAI_API_CALL", 1)
) # NOTE: should change to default 0 (False) once mock is complete
@pytest.fixture(scope="session")
def rsp_cache():
rsp_cache_file_path = TEST_DATA_PATH / "rsp_cache.json" # read repo-provided
new_rsp_cache_file_path = TEST_DATA_PATH / "rsp_cache_new.json" # exporting a new copy
if os.path.exists(rsp_cache_file_path):
with open(rsp_cache_file_path, "r", encoding="utf-8") as f1:
rsp_cache_json = json.load(f1)
else:
rsp_cache_json = {}
yield rsp_cache_json
with open(rsp_cache_file_path, "w", encoding="utf-8") as f2:
json.dump(rsp_cache_json, f2, indent=4, ensure_ascii=False)
with open(new_rsp_cache_file_path, "w", encoding="utf-8") as f2:
json.dump(RSP_CACHE_NEW, f2, indent=4, ensure_ascii=False)
# Hook to capture the test result
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
rep = outcome.get_result()
if rep.when == "call":
item.test_outcome = rep
@pytest.fixture(scope="function", autouse=True)
def llm_mock(rsp_cache, mocker, request):
llm = MockLLM(allow_open_api_call=ALLOW_OPENAI_API_CALL)
llm.rsp_cache = rsp_cache
mocker.patch("metagpt.provider.base_llm.BaseLLM.aask", llm.aask)
mocker.patch("metagpt.provider.base_llm.BaseLLM.aask_batch", llm.aask_batch)
mocker.patch("metagpt.provider.openai_api.OpenAILLM.aask_code", llm.aask_code)
yield mocker
if hasattr(request.node, "test_outcome") and request.node.test_outcome.passed:
if llm.rsp_candidates:
for rsp_candidate in llm.rsp_candidates:
cand_key = list(rsp_candidate.keys())[0]
cand_value = list(rsp_candidate.values())[0]
if cand_key not in llm.rsp_cache:
logger.info(f"Added '{cand_key[:100]} ... -> {str(cand_value)[:20]} ...' to response cache")
llm.rsp_cache.update(rsp_candidate)
RSP_CACHE_NEW.update(rsp_candidate)
class Context:
def __init__(self):
self._llm_ui = None
self._llm_api = LLM()
@property
def llm_api(self):
# 1. 初始化llm,带有缓存结果
# 2. 如果缓存query,那么直接返回缓存结果
# 3. 如果没有缓存query,那么调用llm_api,返回结果
# 4. 如果有缓存query,那么更新缓存结果
return self._llm_api
@pytest.fixture(scope="package")
def llm_api():
logger.info("Setting up the test")
g_context = Context()
yield g_context.llm_api
logger.info("Tearing down the test")
@pytest.fixture
def proxy():
pattern = re.compile(
rb"(?P<method>[a-zA-Z]+) (?P<uri>(\w+://)?(?P<host>[^\s\'\"<>\[\]{}|/:]+)(:(?P<port>\d+))?[^\s\'\"<>\[\]{}|]*) "
)
async def pipe(reader, writer):
while not reader.at_eof():
writer.write(await reader.read(2048))
writer.close()
await writer.wait_closed()
async def handle_client(reader, writer):
data = await reader.readuntil(b"\r\n\r\n")
infos = pattern.match(data)
host, port = infos.group("host"), infos.group("port")
print(f"Proxy: {host}") # checking with capfd fixture
port = int(port) if port else 80
remote_reader, remote_writer = await asyncio.open_connection(host, port)
if data.startswith(b"CONNECT"):
writer.write(b"HTTP/1.1 200 Connection Established\r\n\r\n")
else:
remote_writer.write(data)
await asyncio.gather(pipe(reader, remote_writer), pipe(remote_reader, writer))
async def proxy_func():
server = await asyncio.start_server(handle_client, "127.0.0.1", 0)
return server, "http://{}:{}".format(*server.sockets[0].getsockname())
return proxy_func
# see https://github.com/Delgan/loguru/issues/59#issuecomment-466591978
@pytest.fixture
def loguru_caplog(caplog):
class PropogateHandler(logging.Handler):
def emit(self, record):
logging.getLogger(record.name).handle(record)
logger.add(PropogateHandler(), format="{message}")
yield caplog
@pytest.fixture(scope="function")
def context(request):
ctx = MetagptContext()
repo = GitRepository(local_path=DEFAULT_WORKSPACE_ROOT / f"unittest/{uuid.uuid4().hex}")
ctx.config.project_path = str(repo.workdir)
# Destroy git repo at the end of the test session.
def fin():
if ctx.config.project_path:
git_repo = GitRepository(ctx.config.project_path)
git_repo.delete_repository()
# Register the function for destroying the environment.
request.addfinalizer(fin)
return ctx
@pytest.fixture(scope="session", autouse=True)
def init_config():
pass
@pytest.fixture(scope="function")
def new_filename(mocker):
# NOTE: Mock new filename to make reproducible llm aask, should consider changing after implementing requirement segmentation
mocker.patch("metagpt.utils.file_repository.FileRepository.new_filename", lambda: "20240101")
yield mocker
def _rsp_cache(name):
rsp_cache_file_path = TEST_DATA_PATH / f"{name}.json" # read repo-provided
if os.path.exists(rsp_cache_file_path):
with open(rsp_cache_file_path, "r") as f1:
rsp_cache_json = json.load(f1)
else:
rsp_cache_json = {}
yield rsp_cache_json
with open(rsp_cache_file_path, "w") as f2:
json.dump(rsp_cache_json, f2, indent=4, ensure_ascii=False)
@pytest.fixture(scope="session")
def search_rsp_cache():
yield from _rsp_cache("search_rsp_cache")
@pytest.fixture(scope="session")
def mermaid_rsp_cache():
yield from _rsp_cache("mermaid_rsp_cache")
@pytest.fixture
def aiohttp_mocker(mocker):
MockResponse = type("MockResponse", (MockAioResponse,), {})
def wrap(method):
def run(self, url, **kwargs):
return MockResponse(self, method, url, **kwargs)
return run
mocker.patch("aiohttp.ClientSession.request", MockResponse)
for i in ["get", "post", "delete", "patch"]:
mocker.patch(f"aiohttp.ClientSession.{i}", wrap(i))
yield MockResponse
@pytest.fixture
def curl_cffi_mocker(mocker):
MockResponse = type("MockResponse", (MockCurlCffiResponse,), {})
def request(self, *args, **kwargs):
return MockResponse(self, *args, **kwargs)
mocker.patch("curl_cffi.requests.Session.request", request)
yield MockResponse
@pytest.fixture
def httplib2_mocker(mocker):
MockResponse = type("MockResponse", (MockHttplib2Response,), {})
def request(self, *args, **kwargs):
return MockResponse(self, *args, **kwargs)
mocker.patch("httplib2.Http.request", request)
yield MockResponse
@pytest.fixture
def search_engine_mocker(aiohttp_mocker, curl_cffi_mocker, httplib2_mocker, search_rsp_cache):
# aiohttp_mocker: serpapi/serper
# httplib2_mocker: google
# curl_cffi_mocker: ddg
check_funcs: dict[tuple[str, str], Callable[[dict], str]] = {}
aiohttp_mocker.rsp_cache = httplib2_mocker.rsp_cache = curl_cffi_mocker.rsp_cache = search_rsp_cache
aiohttp_mocker.check_funcs = httplib2_mocker.check_funcs = curl_cffi_mocker.check_funcs = check_funcs
yield check_funcs
@pytest.fixture
def http_server():
async def start(handler=None):
if handler is None:
async def handler(request):
return aiohttp.web.Response(
text="""<!DOCTYPE html><html lang="en"><head><meta charset="UTF-8">
<title>MetaGPT</title></head><body><h1>MetaGPT</h1></body></html>""",
content_type="text/html",
)
server = aiohttp.web.Server(handler)
runner = aiohttp.web.ServerRunner(server)
await runner.setup()
site = aiohttp.web.TCPSite(runner, "127.0.0.1", 0)
await site.start()
_, port, *_ = site._server.sockets[0].getsockname()
return site, f"http://127.0.0.1:{port}"
return start
@pytest.fixture
def mermaid_mocker(aiohttp_mocker, mermaid_rsp_cache):
check_funcs: dict[tuple[str, str], Callable[[dict], str]] = {}
aiohttp_mocker.rsp_cache = mermaid_rsp_cache
aiohttp_mocker.check_funcs = check_funcs
yield check_funcs
@pytest.fixture
def git_dir():
"""Fixture to get the unittest directory."""
git_dir = DEFAULT_WORKSPACE_ROOT / f"unittest/{uuid.uuid4().hex}"
git_dir.mkdir(parents=True, exist_ok=True)
return git_dir
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/__init__.py | tests/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/4/29 15:53
@Author : alexanderwu
@File : __init__.py
"""
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/mock/mock_curl_cffi.py | tests/mock/mock_curl_cffi.py | import json
from typing import Callable
from curl_cffi import requests
origin_request = requests.Session.request
class MockCurlCffiResponse(requests.Response):
check_funcs: dict[tuple[str, str], Callable[[dict], str]] = {}
rsp_cache: dict[str, str] = {}
name = "curl-cffi"
def __init__(self, session, method, url, **kwargs) -> None:
super().__init__()
fn = self.check_funcs.get((method, url))
self.key = f"{self.name}-{method}-{url}-{fn(kwargs) if fn else json.dumps(kwargs, sort_keys=True)}"
self.response = None
if self.key not in self.rsp_cache:
response = origin_request(session, method, url, **kwargs)
self.rsp_cache[self.key] = response.content.decode()
self.content = self.rsp_cache[self.key].encode()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/mock/mock_llm.py | tests/mock/mock_llm.py | import json
from typing import Optional, Union
from metagpt.config2 import config
from metagpt.configs.llm_config import LLMType
from metagpt.const import LLM_API_TIMEOUT
from metagpt.logs import logger
from metagpt.provider.azure_openai_api import AzureOpenAILLM
from metagpt.provider.constant import GENERAL_FUNCTION_SCHEMA
from metagpt.provider.openai_api import OpenAILLM
from metagpt.schema import Message
OriginalLLM = OpenAILLM if config.llm.api_type == LLMType.OPENAI else AzureOpenAILLM
class MockLLM(OriginalLLM):
def __init__(self, allow_open_api_call):
original_llm_config = (
config.get_openai_llm() if config.llm.api_type == LLMType.OPENAI else config.get_azure_llm()
)
super().__init__(original_llm_config)
self.allow_open_api_call = allow_open_api_call
self.rsp_cache: dict = {}
self.rsp_candidates: list[dict] = [] # a test can have multiple calls with the same llm, thus a list
async def acompletion_text(self, messages: list[dict], stream=False, timeout=LLM_API_TIMEOUT) -> str:
"""Overwrite original acompletion_text to cancel retry"""
if stream:
resp = await self._achat_completion_stream(messages, timeout=timeout)
return resp
rsp = await self._achat_completion(messages, timeout=timeout)
return self.get_choice_text(rsp)
async def original_aask(
self,
msg: Union[str, list[dict[str, str]]],
system_msgs: Optional[list[str]] = None,
format_msgs: Optional[list[dict[str, str]]] = None,
images: Optional[Union[str, list[str]]] = None,
timeout=LLM_API_TIMEOUT,
stream=True,
) -> str:
if system_msgs:
message = self._system_msgs(system_msgs)
else:
message = [self._default_system_msg()]
if not self.use_system_prompt:
message = []
if format_msgs:
message.extend(format_msgs)
if isinstance(msg, str):
message.append(self._user_msg(msg, images=images))
else:
message.extend(msg)
logger.debug(message)
rsp = await self.acompletion_text(message, stream=stream, timeout=timeout)
return rsp
async def original_aask_batch(self, msgs: list, timeout=LLM_API_TIMEOUT) -> str:
"""A copy of metagpt.provider.base_llm.BaseLLM.aask_batch, we can't use super().aask because it will be mocked"""
context = []
for msg in msgs:
umsg = self._user_msg(msg)
context.append(umsg)
rsp_text = await self.acompletion_text(context, timeout=timeout)
context.append(self._assistant_msg(rsp_text))
return self._extract_assistant_rsp(context)
async def original_aask_code(self, messages: Union[str, Message, list[dict]], **kwargs) -> dict:
"""
A copy of metagpt.provider.openai_api.OpenAILLM.aask_code, we can't use super().aask because it will be mocked.
Since openai_api.OpenAILLM.aask_code is different from base_llm.BaseLLM.aask_code, we use the former.
"""
if "tools" not in kwargs:
configs = {"tools": [{"type": "function", "function": GENERAL_FUNCTION_SCHEMA}]}
kwargs.update(configs)
rsp = await self._achat_completion_function(messages, **kwargs)
return self.get_choice_function_arguments(rsp)
async def aask(
self,
msg: Union[str, list[dict[str, str]]],
system_msgs: Optional[list[str]] = None,
format_msgs: Optional[list[dict[str, str]]] = None,
images: Optional[Union[str, list[str]]] = None,
timeout=LLM_API_TIMEOUT,
stream=False,
) -> str:
# used to identify it a message has been called before
if isinstance(msg, list):
msg_key = "#MSG_SEP#".join([m["content"] for m in msg])
else:
msg_key = msg
if system_msgs:
joined_system_msg = "#MSG_SEP#".join(system_msgs) + "#SYSTEM_MSG_END#"
msg_key = joined_system_msg + msg_key
rsp = await self._mock_rsp(msg_key, self.original_aask, msg, system_msgs, format_msgs, images, timeout, stream)
return rsp
async def aask_batch(self, msgs: list, timeout=LLM_API_TIMEOUT) -> str:
msg_key = "#MSG_SEP#".join([msg if isinstance(msg, str) else msg.content for msg in msgs])
rsp = await self._mock_rsp(msg_key, self.original_aask_batch, msgs, timeout)
return rsp
async def aask_code(self, messages: Union[str, Message, list[dict]], **kwargs) -> dict:
msg_key = json.dumps(self.format_msg(messages), ensure_ascii=False)
rsp = await self._mock_rsp(msg_key, self.original_aask_code, messages, **kwargs)
return rsp
async def _mock_rsp(self, msg_key, ask_func, *args, **kwargs):
if msg_key not in self.rsp_cache:
if not self.allow_open_api_call:
raise ValueError(
"In current test setting, api call is not allowed, you should properly mock your tests, "
"or add expected api response in tests/data/rsp_cache.json. "
)
# Call the original unmocked method
rsp = await ask_func(*args, **kwargs)
else:
logger.warning("Use response cache")
rsp = self.rsp_cache[msg_key]
self.rsp_candidates.append({msg_key: rsp})
return rsp
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/mock/mock_aiohttp.py | tests/mock/mock_aiohttp.py | import json
from typing import Callable
from aiohttp.client import ClientSession
origin_request = ClientSession.request
class MockAioResponse:
check_funcs: dict[tuple[str, str], Callable[[dict], str]] = {}
rsp_cache: dict[str, str] = {}
name = "aiohttp"
status = 200
def __init__(self, session, method, url, **kwargs) -> None:
fn = self.check_funcs.get((method, url))
_kwargs = {k: v for k, v in kwargs.items() if k != "proxy"}
self.key = f"{self.name}-{method}-{url}-{fn(kwargs) if fn else json.dumps(_kwargs, sort_keys=True)}"
self.mng = self.response = None
if self.key not in self.rsp_cache:
self.mng = origin_request(session, method, url, **kwargs)
async def __aenter__(self):
if self.response:
await self.response.__aenter__()
self.status = self.response.status
elif self.mng:
self.response = await self.mng.__aenter__()
return self
async def __aexit__(self, *args, **kwargs):
if self.response:
await self.response.__aexit__(*args, **kwargs)
self.response = None
elif self.mng:
await self.mng.__aexit__(*args, **kwargs)
self.mng = None
async def json(self, *args, **kwargs):
if self.key in self.rsp_cache:
return self.rsp_cache[self.key]
data = await self.response.json(*args, **kwargs)
self.rsp_cache[self.key] = data
return data
@property
def content(self):
return self
async def read(self):
if self.key in self.rsp_cache:
return eval(self.rsp_cache[self.key])
data = await self.response.content.read()
self.rsp_cache[self.key] = str(data)
return data
def raise_for_status(self):
if self.response:
self.response.raise_for_status()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/mock/mock_httplib2.py | tests/mock/mock_httplib2.py | import json
from typing import Callable
from urllib.parse import parse_qsl, urlparse
import httplib2
origin_request = httplib2.Http.request
class MockHttplib2Response(httplib2.Response):
check_funcs: dict[tuple[str, str], Callable[[dict], str]] = {}
rsp_cache: dict[str, str] = {}
name = "httplib2"
def __init__(self, http, uri, method="GET", **kwargs) -> None:
url = uri.split("?")[0]
result = urlparse(uri)
params = dict(parse_qsl(result.query))
fn = self.check_funcs.get((method, uri))
new_kwargs = {"params": params}
key = f"{self.name}-{method}-{url}-{fn(new_kwargs) if fn else json.dumps(new_kwargs)}"
if key not in self.rsp_cache:
_, self.content = origin_request(http, uri, method, **kwargs)
self.rsp_cache[key] = self.content.decode()
self.content = self.rsp_cache[key]
def __iter__(self):
yield self
yield self.content.encode()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/test_llm.py | tests/metagpt/test_llm.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/11 14:45
@Author : alexanderwu
@File : test_llm.py
"""
import pytest
from metagpt.llm import LLM
@pytest.fixture()
def llm():
return LLM()
@pytest.mark.asyncio
async def test_llm_aask(llm):
rsp = await llm.aask("hello world", stream=False)
assert len(rsp) > 0
@pytest.mark.asyncio
async def test_llm_aask_stream(llm):
rsp = await llm.aask("hello world", stream=True)
assert len(rsp) > 0
@pytest.mark.asyncio
async def test_llm_acompletion(llm):
hello_msg = [{"role": "user", "content": "hello"}]
rsp = await llm.acompletion(hello_msg)
assert len(rsp.choices[0].message.content) > 0
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/test_reporter.py | tests/metagpt/test_reporter.py | import ast
from contextlib import asynccontextmanager
import aiohttp.web
import pytest
from metagpt.logs import log_llm_stream
from metagpt.utils.report import (
END_MARKER_NAME,
BlockType,
BrowserReporter,
DocsReporter,
EditorReporter,
NotebookReporter,
ServerReporter,
TaskReporter,
TerminalReporter,
)
class MockFileLLM:
def __init__(self, data: str):
self.data = data
async def aask(self, *args, **kwargs) -> str:
for i in self.data.splitlines(keepends=True):
log_llm_stream(i)
log_llm_stream("\n")
return self.data
@asynccontextmanager
async def callback_server(http_server):
callback_data = []
async def handler(request):
callback_data.append(await request.json())
return aiohttp.web.json_response({})
server, url = await http_server(handler)
yield url, callback_data
await server.stop()
@pytest.mark.asyncio
async def test_terminal_report(http_server):
async with callback_server(http_server) as (url, callback_data):
async with TerminalReporter(callback_url=url) as reporter:
await reporter.async_report("ls -a", "cmd")
await reporter.async_report("main.py\n", "output")
await reporter.async_report("setup.py\n", "output")
assert all(BlockType.TERMINAL is BlockType(i["block"]) for i in callback_data)
assert all(i["uuid"] == callback_data[0]["uuid"] for i in callback_data[1:])
assert "".join(i["value"] for i in callback_data if i["name"] != END_MARKER_NAME) == "ls -amain.py\nsetup.py\n"
@pytest.mark.asyncio
async def test_browser_report(http_server):
img = b"\x89PNG\r\n\x1a\n\x00\x00"
web_url = "https://docs.deepwisdom.ai"
class AsyncPage:
async def screenshot(self):
return img
async with callback_server(http_server) as (url, callback_data):
async with BrowserReporter(callback_url=url) as reporter:
await reporter.async_report(web_url, "url")
await reporter.async_report(AsyncPage(), "page")
assert all(BlockType.BROWSER is BlockType(i["block"]) for i in callback_data)
assert all(i["uuid"] == callback_data[0]["uuid"] for i in callback_data[1:])
assert len(callback_data) == 3
assert callback_data[-1]["name"] == END_MARKER_NAME
assert callback_data[0]["name"] == "url"
assert callback_data[0]["value"] == web_url
assert callback_data[1]["name"] == "page"
assert ast.literal_eval(callback_data[1]["value"]) == img
@pytest.mark.asyncio
async def test_server_reporter(http_server):
local_url = "http://127.0.0.1:8080/index.html"
async with callback_server(http_server) as (url, callback_data):
reporter = ServerReporter(callback_url=url)
await reporter.async_report(local_url)
assert all(BlockType.BROWSER_RT is BlockType(i["block"]) for i in callback_data)
assert len(callback_data) == 1
assert callback_data[0]["name"] == "local_url"
assert callback_data[0]["value"] == local_url
assert not callback_data[0]["is_chunk"]
@pytest.mark.asyncio
async def test_task_reporter(http_server):
task = {"current_task_id": "", "tasks": []}
async with callback_server(http_server) as (url, callback_data):
reporter = TaskReporter(callback_url=url)
await reporter.async_report(task)
assert all(BlockType.TASK is BlockType(i["block"]) for i in callback_data)
assert len(callback_data) == 1
assert callback_data[0]["name"] == "object"
assert callback_data[0]["value"] == task
@pytest.mark.asyncio
async def test_notebook_reporter(http_server):
code = {
"cell_type": "code",
"execution_count": None,
"id": "e1841c44",
"metadata": {},
"outputs": [],
"source": ["\n", "import time\n", "print('will sleep 1s.')\n", "time.sleep(1)\n", "print('end.')\n", ""],
}
output1 = {"name": "stdout", "output_type": "stream", "text": ["will sleep 1s.\n"]}
output2 = {"name": "stdout", "output_type": "stream", "text": ["will sleep 1s.\n"]}
code_path = "/data/main.ipynb"
async with callback_server(http_server) as (url, callback_data):
async with NotebookReporter(callback_url=url) as reporter:
await reporter.async_report(code, "content")
await reporter.async_report(output1, "content")
await reporter.async_report(output2, "content")
await reporter.async_report(code_path, "path")
assert all(BlockType.NOTEBOOK is BlockType(i["block"]) for i in callback_data)
assert len(callback_data) == 5
assert callback_data[-1]["name"] == END_MARKER_NAME
assert callback_data[-2]["name"] == "path"
assert callback_data[-2]["value"] == code_path
assert all(i["uuid"] == callback_data[0]["uuid"] for i in callback_data[1:])
assert [i["value"] for i in callback_data if i["name"] == "content"] == [code, output1, output2]
@pytest.mark.asyncio
@pytest.mark.parametrize(
("data", "file_path", "meta", "block", "report_cls"),
(
(
"## Language\n\nen_us\n\n## Programming Language\n\nPython\n\n## Original Requirements\n\nCreate a 2048 gam...",
"/data/prd.md",
{"type": "write_prd"},
BlockType.DOCS,
DocsReporter,
),
(
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nprint('Hello World')\n",
"/data/main.py",
{"type": "write_code"},
BlockType.EDITOR,
EditorReporter,
),
),
ids=["test_docs_reporter", "test_editor_reporter"],
)
async def test_llm_stream_reporter(data, file_path, meta, block, report_cls, http_server):
async with callback_server(http_server) as (url, callback_data):
async with report_cls(callback_url=url, enable_llm_stream=True) as reporter:
await reporter.async_report(meta, "meta")
await MockFileLLM(data).aask("")
await reporter.wait_llm_stream_report()
await reporter.async_report(file_path, "path")
assert callback_data
assert all(block is BlockType(i["block"]) for i in callback_data)
assert all(i["uuid"] == callback_data[0]["uuid"] for i in callback_data[1:])
chunks, names = [], set()
for i in callback_data:
name = i["name"]
names.add(name)
if name == "meta":
assert i["value"] == meta
elif name == "path":
assert i["value"] == file_path
elif name == END_MARKER_NAME:
pass
elif name == "content":
chunks.append(i["value"])
else:
raise ValueError
assert "".join(chunks[:-1]) == data
assert names == {"meta", "path", "content", END_MARKER_NAME}
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/test_incremental_dev.py | tests/metagpt/test_incremental_dev.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/01/03
@Author : mannaandpoem
@File : test_incremental_dev.py
"""
import os
import shutil
import subprocess
import time
import pytest
from typer.testing import CliRunner
from metagpt.const import TEST_DATA_PATH
from metagpt.logs import logger
from metagpt.software_company import app
runner = CliRunner()
IDEAS = [
"Add subtraction, multiplication and division operations to the calculator. The current calculator can only perform basic addition operations, and it is necessary to introduce subtraction, multiplication, division operation into the calculator",
"Adding graphical interface functionality to enhance the user experience in the number-guessing game. The existing number-guessing game currently relies on command-line input for numbers. The goal is to introduce a graphical interface to improve the game's usability and visual appeal",
"Add a feature to remove deprecated words from the word cloud. The current word cloud generator does not support removing deprecated words. Now, The word cloud generator should support removing deprecated words. Customize deactivated words to exclude them from word cloud. Let users see all the words in the text file, and allow users to select the words they want to remove.",
"Add an AI opponent with fixed difficulty levels. Currently, the game only allows players to compete against themselves. Implement an AI algorithm that can playing with player. This will provide a more engaging and challenging experience for players.",
"Add functionality to view the history of scores. The original dice rolling game could only display the current game result, but the new requirement allows players to view the history of scores",
"Add functionality to view the history of scores and perform statistical analysis on them. The original dice rolling game could only display the current game result, but the new requirement allows players to view the history of scores and display the statistical analysis results of the current score",
"Changed score target for 2048 game from 2048 to 4096. Please change the game's score target from 2048 to 4096, and change the interface size from 4*4 to 8*8",
"Display the history score of the player in the 2048 game. Add a record board that can display players' historical score records so that players can trace their scores",
"Incremental Idea Gradually increase the speed of the snake as the game progresses. In the current version of the game, the snake’s speed remains constant throughout the gameplay. Implement a feature where the snake’s speed gradually increases over time, making the game more challenging and intense as the player progresses.",
"Introduce power-ups and obstacles to the game. The current version of the game only involves eating food and growing the snake. Add new elements such as power-ups that can enhance the snake’s speed or make it invincible for a short duration. At the same time, introduce obstacles like walls or enemies that the snake must avoid or overcome to continue growing.",
]
PROJECT_NAMES = [
"simple_add_calculator",
"number_guessing_game",
"word_cloud",
"Gomoku",
"dice_simulator_new",
"dice_simulator_new",
"pygame_2048",
"pygame_2048",
"snake_game",
"snake_game",
]
@pytest.mark.skip
def test_simple_add_calculator():
result = get_incremental_dev_result(IDEAS[0], PROJECT_NAMES[0])
log_and_check_result(result)
@pytest.mark.skip
def test_number_guessing_game():
result = get_incremental_dev_result(IDEAS[1], PROJECT_NAMES[1])
log_and_check_result(result)
@pytest.mark.skip
def test_word_cloud():
result = get_incremental_dev_result(IDEAS[2], PROJECT_NAMES[2])
log_and_check_result(result)
@pytest.mark.skip
def test_gomoku():
result = get_incremental_dev_result(IDEAS[3], PROJECT_NAMES[3])
log_and_check_result(result)
@pytest.mark.skip
def test_dice_simulator_new():
for i, (idea, project_name) in enumerate(zip(IDEAS[4:6], PROJECT_NAMES[4:6]), start=1):
result = get_incremental_dev_result(idea, project_name)
log_and_check_result(result, "refine_" + str(i))
@pytest.mark.skip
def test_refined_pygame_2048():
for i, (idea, project_name) in enumerate(zip(IDEAS[6:8], PROJECT_NAMES[6:8]), start=1):
result = get_incremental_dev_result(idea, project_name)
log_and_check_result(result, "refine_" + str(i))
@pytest.mark.skip
def test_refined_snake_game():
for i, (idea, project_name) in enumerate(zip(IDEAS[8:10], PROJECT_NAMES[8:10]), start=1):
result = get_incremental_dev_result(idea, project_name)
log_and_check_result(result, "refine_" + str(i))
def log_and_check_result(result, tag_name="refine"):
logger.info(result)
logger.info(result.output)
if "Aborting" in result.output:
assert False
else:
# After running, there will be new commit
cur_tag = subprocess.run(["git", "describe", "--tags"], capture_output=True, text=True).stdout.strip()
if cur_tag == "base":
assert False
else:
assert True
if subprocess.run(["git", "show-ref", "--verify", "--quiet", f"refs/tags/{tag_name}"]).returncode == 0:
tag_name += str(int(time.time()))
try:
subprocess.run(["git", "tag", tag_name], check=True)
except subprocess.CalledProcessError as e:
raise e
def get_incremental_dev_result(idea, project_name, use_review=True):
project_path = TEST_DATA_PATH / "incremental_dev_project" / project_name
# Check if the project path exists
if not project_path.exists():
# If the project does not exist, extract the project file
try:
if shutil.which("unzip"):
subprocess.run(["unzip", f"{project_path}.zip", "-d", str(project_path.parent)], check=True)
elif shutil.which("tar"):
subprocess.run(["tar", "-xf", f"{project_path}.zip", "-C", str(project_path.parent)], check=True)
logger.info(f"Extracted project {project_name} successfully.")
except FileNotFoundError as e:
raise FileNotFoundError(f"Neither 'unzip' nor 'tar' command found. Error: {e}")
except subprocess.CalledProcessError as e:
raise Exception(f"Failed to extract project {project_name}. Error: {e}")
check_or_create_base_tag(project_path)
args = [idea, "--inc", "--project-path", project_path, "--n-round", "20"]
if not use_review:
args.append("--no-code-review")
result = runner.invoke(app, args)
return result
def check_or_create_base_tag(project_path):
# Change the current working directory to the specified project path
os.chdir(project_path)
# Initialize a Git repository
subprocess.run(["git", "init"], check=True)
# Check if the .gitignore exists. If it doesn't exist, create .gitignore and add the comment
subprocess.run(f"echo # Ignore these files or directories > {'.gitignore'}", shell=True)
# Check if the 'base' tag exists
check_base_tag_cmd = ["git", "show-ref", "--verify", "--quiet", "refs/tags/base"]
if subprocess.run(check_base_tag_cmd).returncode == 0:
has_base_tag = True
else:
has_base_tag = False
if has_base_tag:
logger.info("Base tag exists")
# Switch to the 'base' branch if it exists
try:
status = subprocess.run(["git", "status", "-s"], capture_output=True, text=True).stdout.strip()
if status:
subprocess.run(["git", "clean", "-df"])
subprocess.run(["git", "checkout", "-f", "base"], check=True)
logger.info("Switched to base branch")
except Exception as e:
logger.error("Failed to switch to base branch")
raise e
else:
logger.info("Base tag doesn't exist.")
# Add and commit the current code if 'base' tag doesn't exist
add_cmd = ["git", "add", "."]
try:
subprocess.run(add_cmd, check=True)
logger.info("Files added successfully.")
except subprocess.CalledProcessError as e:
logger.error(f"Failed to add files: {e}")
commit_cmd = ["git", "commit", "-m", "Initial commit"]
try:
subprocess.run(commit_cmd, check=True)
logger.info("Committed all files with the message 'Initial commit'.")
except subprocess.CalledProcessError as e:
logger.error(f"Failed to commit: {e.stderr}")
# Add 'base' tag
add_base_tag_cmd = ["git", "tag", "base"]
# Check if the 'git tag' command was successful
try:
subprocess.run(add_base_tag_cmd, check=True)
logger.info("Added 'base' tag.")
except Exception as e:
logger.error("Failed to add 'base' tag.")
raise e
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/test_software_company.py | tests/metagpt/test_software_company.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/15 11:40
@Author : alexanderwu
@File : test_software_company.py
"""
import pytest
from typer.testing import CliRunner
from metagpt.logs import logger
from metagpt.software_company import app
from metagpt.team import Team
runner = CliRunner()
@pytest.mark.asyncio
async def test_empty_team(new_filename):
# FIXME: we're now using "metagpt" cli, so the entrance should be replaced instead.
company = Team()
history = await company.run(idea="Build a simple search system. I will upload my files later.")
logger.info(history)
def test_software_company(new_filename):
args = ["Make a cli snake game"]
result = runner.invoke(app, args)
logger.info(result)
logger.info(result.output)
def test_software_company_with_run_tests():
args = ["Make a cli snake game", "--run-tests", "--n-round=8"]
result = runner.invoke(app, args)
logger.info(result.output)
# assert "unittest" in result.output.lower() or "pytest" in result.output.lower()
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/test_environment.py | tests/metagpt/test_environment.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/12 00:47
@Author : alexanderwu
@File : test_environment.py
"""
from pathlib import Path
import pytest
from metagpt.actions import UserRequirement
from metagpt.actions.prepare_documents import PrepareDocuments
from metagpt.context import Context
from metagpt.environment import Environment
from metagpt.logs import logger
from metagpt.roles import (
Architect,
Engineer,
ProductManager,
ProjectManager,
QaEngineer,
Role,
)
from metagpt.schema import Message, UserMessage
from metagpt.utils.common import any_to_str, is_send_to
serdeser_path = Path(__file__).absolute().parent.joinpath("../data/serdeser_storage")
class MockEnv(Environment):
def publish_message(self, message: Message, peekable: bool = True) -> bool:
logger.info(f"{message.metadata}:{message.content}")
consumers = []
for role, addrs in self.member_addrs.items():
if is_send_to(message, addrs):
role.put_message(message)
consumers.append(role)
if not consumers:
logger.warning(f"Message no recipients: {message.dump()}")
if message.cause_by in [any_to_str(UserRequirement), any_to_str(PrepareDocuments)]:
assert len(consumers) == 1
return True
@pytest.fixture
def env():
context = Context()
context.kwargs.tag = __file__
return MockEnv(context=context)
def test_add_role(env: Environment):
role = ProductManager(
name="Alice", profile="product manager", goal="create a new product", constraints="limited resources"
)
env.add_role(role)
assert env.get_role(str(role._setting)) == role
def test_get_roles(env: Environment):
role1 = Role(name="Alice", profile="product manager", goal="create a new product", constraints="limited resources")
role2 = Role(name="Bob", profile="engineer", goal="develop the new product", constraints="short deadline")
env.add_role(role1)
env.add_role(role2)
roles = env.get_roles()
assert roles == {role1.profile: role1, role2.profile: role2}
@pytest.mark.asyncio
async def test_publish_and_process_message(env: Environment):
if env.context.git_repo:
env.context.git_repo.delete_repository()
env.context.git_repo = None
product_manager = ProductManager(name="Alice", profile="Product Manager", goal="做AI Native产品", constraints="资源有限")
architect = Architect(
name="Bob", profile="Architect", goal="设计一个可用、高效、较低成本的系统,包括数据结构与接口", constraints="资源有限,需要节省成本"
)
env.add_roles([product_manager, architect])
env.publish_message(UserMessage(content="需要一个基于LLM做总结的搜索引擎", cause_by=UserRequirement, send_to=product_manager))
await env.run(k=2)
logger.info(f"{env.history}")
assert len(env.history.storage) == 0
@pytest.mark.skip
@pytest.mark.asyncio
@pytest.mark.parametrize(
("content", "send_to"),
[
("snake game", any_to_str(ProductManager)),
(
"Rewrite the PRD file of the project at '/Users/iorishinier/github/MetaGPT/workspace/snake_game', add 'moving enemy' to the original requirement",
any_to_str(ProductManager),
),
(
"Add 'random moving enemy, and dispears after 10 seconds' design to the project at '/Users/iorishinier/github/MetaGPT/workspace/snake_game'",
any_to_str(Architect),
),
(
'Rewrite the tasks file of the project at "/Users/iorishinier/github/MetaGPT/workspace/snake_game"',
any_to_str(ProjectManager),
),
(
"src filename is 'game.py', Uncaught SyntaxError: Identifier 'Position' has already been declared (at game.js:1:1), the project at '/Users/iorishinier/github/bak/MetaGPT/workspace/snake_game'",
any_to_str(Engineer),
),
(
"Rewrite the unit test of 'main.py' at '/Users/iorishinier/github/MetaGPT/workspace/snake_game'",
any_to_str(QaEngineer),
),
],
)
async def test_env(content, send_to):
context = Context()
env = MockEnv(context=context)
env.add_roles(
[
ProductManager(context=context),
Architect(context=context),
ProjectManager(context=context),
Engineer(n_borg=5, use_code_review=True, context=context),
QaEngineer(context=context, test_round_allowed=2),
]
)
msg = UserMessage(content=content, send_to=send_to)
env.publish_message(msg)
while not env.is_idle:
await env.run()
pass
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/test_config.py | tests/metagpt/test_config.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/1/9 15:57
@Author : alexanderwu
@File : test_config.py
"""
from metagpt.config2 import Config
from metagpt.configs.llm_config import LLMType
from tests.metagpt.provider.mock_llm_config import mock_llm_config
def test_config_1():
cfg = Config.default()
llm = cfg.get_openai_llm()
if cfg.llm.api_type == LLMType.OPENAI:
assert llm is not None
def test_config_from_dict():
cfg = Config(llm=mock_llm_config)
assert cfg
assert cfg.llm.api_key == "mock_api_key"
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/test_team.py | tests/metagpt/test_team.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : unittest of team
from metagpt.roles.project_manager import ProjectManager
from metagpt.team import Team
def test_team():
company = Team()
company.hire([ProjectManager()])
assert len(company.env.roles) == 1
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/test_role.py | tests/metagpt/test_role.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/11 14:44
@Author : alexanderwu
@File : test_role.py
@Modified By: mashenquan, 2023-11-1. In line with Chapter 2.2.1 and 2.2.2 of RFC 116, introduce unit tests for
the utilization of the new message distribution feature in message handling.
@Modified By: mashenquan, 2023-11-4. According to the routing feature plan in Chapter 2.2.3.2 of RFC 113, the routing
functionality is to be consolidated into the `Environment` class.
"""
import uuid
from unittest.mock import MagicMock
import pytest
from pydantic import BaseModel
from metagpt.actions import Action, ActionOutput, UserRequirement
from metagpt.environment import Environment
from metagpt.provider.base_llm import BaseLLM
from metagpt.roles import Role
from metagpt.schema import Message
from metagpt.utils.common import any_to_name, any_to_str
class MockAction(Action):
async def run(self, messages, *args, **kwargs):
assert messages
# TODO to check instruct_content as Message
return ActionOutput(content=messages[-1].content, instruct_content=messages[-1].instruct_content)
class MockRole(Role):
def __init__(self, name="", profile="", goal="", constraints="", desc=""):
super().__init__(name=name, profile=profile, goal=goal, constraints=constraints, desc=desc)
self.set_actions([MockAction()])
def test_basic():
mock_role = MockRole()
assert mock_role.addresses == ({"tests.metagpt.test_role.MockRole"})
assert mock_role.rc.watch == {"metagpt.actions.add_requirement.UserRequirement"}
mock_role = MockRole(name="mock_role")
assert mock_role.addresses == {"tests.metagpt.test_role.MockRole", "mock_role"}
@pytest.mark.asyncio
async def test_react():
class Input(BaseModel):
name: str
profile: str
goal: str
constraints: str
desc: str
address: str
inputs = [
{
"name": "A",
"profile": "Tester",
"goal": "Test",
"constraints": "constraints",
"desc": "desc",
"address": "start",
}
]
for i in inputs:
seed = Input(**i)
role = MockRole(
name=seed.name, profile=seed.profile, goal=seed.goal, constraints=seed.constraints, desc=seed.desc
)
role.set_addresses({seed.address})
assert role.rc.watch == {any_to_str(UserRequirement)}
assert role.name == seed.name
assert role.profile == seed.profile
assert role.goal == seed.goal
assert role.constraints == seed.constraints
assert role.desc == seed.desc
assert role.is_idle
env = Environment()
env.add_role(role)
assert env.get_addresses(role) == {seed.address}
env.publish_message(Message(content="test", msg_to=seed.address))
assert not role.is_idle
while not env.is_idle:
await env.run()
assert role.is_idle
env.publish_message(Message(content="test", cause_by=seed.address))
assert not role.is_idle
while not env.is_idle:
await env.run()
assert role.is_idle
tag = uuid.uuid4().hex
role.set_addresses({tag})
assert env.get_addresses(role) == {tag}
@pytest.mark.asyncio
async def test_send_to():
m = Message(content="a", send_to=["a", MockRole, Message])
assert m.send_to == {"a", any_to_str(MockRole), any_to_str(Message)}
m = Message(content="a", cause_by=MockAction, send_to={"a", MockRole, Message})
assert m.send_to == {"a", any_to_str(MockRole), any_to_str(Message)}
m = Message(content="a", send_to=("a", MockRole, Message))
assert m.send_to == {"a", any_to_str(MockRole), any_to_str(Message)}
def test_init_action():
role = Role()
role.set_actions([MockAction, MockAction])
assert len(role.actions) == 2
@pytest.mark.asyncio
async def test_recover():
# Mock LLM actions
mock_llm = MagicMock(spec=BaseLLM)
mock_llm.aask.side_effect = ["1"]
role = Role()
assert role.is_watch(any_to_str(UserRequirement))
role.put_message(None)
role.publish_message(None)
role.llm = mock_llm
role.set_actions([MockAction, MockAction])
role.recovered = True
role.latest_observed_msg = Message(content="recover_test")
role.rc.state = 0
assert role.action_description == any_to_name(MockAction)
rsp = await role.run()
assert rsp.cause_by == any_to_str(MockAction)
@pytest.mark.asyncio
async def test_think_act():
# Mock LLM actions
mock_llm = MagicMock(spec=BaseLLM)
mock_llm.aask.side_effect = ["ok"]
role = Role()
role.set_actions([MockAction])
await role.think()
role.rc.memory.add(Message("run"))
assert len(role.get_memories()) == 1
rsp = await role.act()
assert rsp
assert isinstance(rsp, ActionOutput)
assert rsp.content == "run"
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/test_prompt.py | tests/metagpt/test_prompt.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/11 14:45
@Author : alexanderwu
@File : test_llm.py
"""
import pytest
from metagpt.llm import LLM
CODE_REVIEW_SMALLEST_CONTEXT = """
## game.js
```Code
// game.js
class Game {
constructor() {
this.board = this.createEmptyBoard();
this.score = 0;
this.bestScore = 0;
}
createEmptyBoard() {
const board = [];
for (let i = 0; i < 4; i++) {
board[i] = [0, 0, 0, 0];
}
return board;
}
startGame() {
this.board = this.createEmptyBoard();
this.score = 0;
this.addRandomTile();
this.addRandomTile();
}
addRandomTile() {
let emptyCells = [];
for (let r = 0; r < 4; r++) {
for (let c = 0; c < 4; c++) {
if (this.board[r][c] === 0) {
emptyCells.push({ r, c });
}
}
}
if (emptyCells.length > 0) {
let randomCell = emptyCells[Math.floor(Math.random() * emptyCells.length)];
this.board[randomCell.r][randomCell.c] = Math.random() < 0.9 ? 2 : 4;
}
}
move(direction) {
// This function will handle the logic for moving tiles
// in the specified direction and merging them
// It will also update the score and add a new random tile if the move is successful
// The actual implementation of this function is complex and would require
// a significant amount of code to handle all the cases for moving and merging tiles
// For the purposes of this example, we will not implement the full logic
// Instead, we will just call addRandomTile to simulate a move
this.addRandomTile();
}
getBoard() {
return this.board;
}
getScore() {
return this.score;
}
getBestScore() {
return this.bestScore;
}
setBestScore(score) {
this.bestScore = score;
}
}
```
"""
MOVE_DRAFT = """
## move function draft
```javascript
move(direction) {
let moved = false;
switch (direction) {
case 'up':
for (let c = 0; c < 4; c++) {
for (let r = 1; r < 4; r++) {
if (this.board[r][c] !== 0) {
let row = r;
while (row > 0 && this.board[row - 1][c] === 0) {
this.board[row - 1][c] = this.board[row][c];
this.board[row][c] = 0;
row--;
moved = true;
}
if (row > 0 && this.board[row - 1][c] === this.board[row][c]) {
this.board[row - 1][c] *= 2;
this.board[row][c] = 0;
this.score += this.board[row - 1][c];
moved = true;
}
}
}
}
break;
case 'down':
// Implement logic for moving tiles down
// Similar to the 'up' case but iterating in reverse order
// and checking for merging in the opposite direction
break;
case 'left':
// Implement logic for moving tiles left
// Similar to the 'up' case but iterating over columns first
// and checking for merging in the opposite direction
break;
case 'right':
// Implement logic for moving tiles right
// Similar to the 'up' case but iterating over columns in reverse order
// and checking for merging in the opposite direction
break;
}
if (moved) {
this.addRandomTile();
}
}
```
"""
FUNCTION_TO_MERMAID_CLASS = """
## context
```
class UIDesign(Action):
#Class representing the UI Design action.
def __init__(self, name, context=None, llm=None):
super().__init__(name, context, llm) # 需要调用LLM进一步丰富UI设计的prompt
@parse
def parse_requirement(self, context: str):
#Parse UI Design draft from the context using regex.
pattern = r"## UI Design draft.*?\n(.*?)## Anything UNCLEAR"
return context, pattern
@parse
def parse_ui_elements(self, context: str):
#Parse Selected Elements from the context using regex.
pattern = r"## Selected Elements.*?\n(.*?)## HTML Layout"
return context, pattern
@parse
def parse_css_code(self, context: str):
pattern = r"```css.*?\n(.*?)## Anything UNCLEAR"
return context, pattern
@parse
def parse_html_code(self, context: str):
pattern = r"```html.*?\n(.*?)```"
return context, pattern
async def draw_icons(self, context, *args, **kwargs):
#Draw icons using SDEngine.
engine = SDEngine()
icon_prompts = self.parse_ui_elements(context)
icons = icon_prompts.split("\n")
icons = [s for s in icons if len(s.strip()) > 0]
prompts_batch = []
for icon_prompt in icons:
# fixme: 添加icon lora
prompt = engine.construct_payload(icon_prompt + ".<lora:WZ0710_AW81e-3_30e3b128d64T32_goon0.5>")
prompts_batch.append(prompt)
await engine.run_t2i(prompts_batch)
logger.info("Finish icon design using StableDiffusion API")
async def _save(self, css_content, html_content):
save_dir = CONFIG.workspace_path / "resources" / "codes"
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
# Save CSS and HTML content to files
css_file_path = save_dir / "ui_design.css"
html_file_path = save_dir / "ui_design.html"
with open(css_file_path, "w") as css_file:
css_file.write(css_content)
with open(html_file_path, "w") as html_file:
html_file.write(html_content)
async def run(self, requirements: list[Message], *args, **kwargs) -> ActionOutput:
#Run the UI Design action.
# fixme: update prompt (根据需求细化prompt)
context = requirements[-1].content
ui_design_draft = self.parse_requirement(context=context)
# todo: parse requirements str
prompt = PROMPT_TEMPLATE.format(context=ui_design_draft, format_example=FORMAT_EXAMPLE)
logger.info(prompt)
ui_describe = await self._aask_v1(prompt, "ui_design", OUTPUT_MAPPING)
logger.info(ui_describe.content)
logger.info(ui_describe.instruct_content)
css = self.parse_css_code(context=ui_describe.content)
html = self.parse_html_code(context=ui_describe.content)
await self._save(css_content=css, html_content=html)
await self.draw_icons(ui_describe.content)
return ui_describe
```
-----
## format example
[CONTENT]
{
"ClassView": "classDiagram\n class A {\n -int x\n +int y\n -int speed\n -int direction\n +__init__(x: int, y: int, speed: int, direction: int)\n +change_direction(new_direction: int) None\n +move() None\n }\n "
}
[/CONTENT]
## nodes: "<node>: <type> # <comment>"
- ClassView: <class 'str'> # Generate the mermaid class diagram corresponding to source code in "context."
## constraint
- Language: Please use the same language as the user input.
- Format: output wrapped inside [CONTENT][/CONTENT] as format example, nothing else.
## action
Fill in the above nodes(ClassView) based on the format example.
"""
MOVE_FUNCTION = """
## move function implementation
```javascript
move(direction) {
let moved = false;
switch (direction) {
case 'up':
for (let c = 0; c < 4; c++) {
for (let r = 1; r < 4; r++) {
if (this.board[r][c] !== 0) {
let row = r;
while (row > 0 && this.board[row - 1][c] === 0) {
this.board[row - 1][c] = this.board[row][c];
this.board[row][c] = 0;
row--;
moved = true;
}
if (row > 0 && this.board[row - 1][c] === this.board[row][c]) {
this.board[row - 1][c] *= 2;
this.board[row][c] = 0;
this.score += this.board[row - 1][c];
moved = true;
}
}
}
}
break;
case 'down':
for (let c = 0; c < 4; c++) {
for (let r = 2; r >= 0; r--) {
if (this.board[r][c] !== 0) {
let row = r;
while (row < 3 && this.board[row + 1][c] === 0) {
this.board[row + 1][c] = this.board[row][c];
this.board[row][c] = 0;
row++;
moved = true;
}
if (row < 3 && this.board[row + 1][c] === this.board[row][c]) {
this.board[row + 1][c] *= 2;
this.board[row][c] = 0;
this.score += this.board[row + 1][c];
moved = true;
}
}
}
}
break;
case 'left':
for (let r = 0; r < 4; r++) {
for (let c = 1; c < 4; c++) {
if (this.board[r][c] !== 0) {
let col = c;
while (col > 0 && this.board[r][col - 1] === 0) {
this.board[r][col - 1] = this.board[r][col];
this.board[r][col] = 0;
col--;
moved = true;
}
if (col > 0 && this.board[r][col - 1] === this.board[r][col]) {
this.board[r][col - 1] *= 2;
this.board[r][col] = 0;
this.score += this.board[r][col - 1];
moved = true;
}
}
}
}
break;
case 'right':
for (let r = 0; r < 4; r++) {
for (let c = 2; c >= 0; c--) {
if (this.board[r][c] !== 0) {
let col = c;
while (col < 3 && this.board[r][col + 1] === 0) {
this.board[r][col + 1] = this.board[r][col];
this.board[r][col] = 0;
col++;
moved = true;
}
if (col < 3 && this.board[r][col + 1] === this.board[r][col]) {
this.board[r][col + 1] *= 2;
this.board[r][col] = 0;
this.score += this.board[r][col + 1];
moved = true;
}
}
}
}
break;
}
if (moved) {
this.addRandomTile();
}
}
```
"""
@pytest.fixture()
def llm():
return LLM()
@pytest.mark.asyncio
async def test_llm_code_review(llm):
choices = [
"Please review the move function code above. Should it be refactor?",
"Please implement the move function",
"Please write a draft for the move function in order to implement it",
]
# prompt = CODE_REVIEW_SMALLEST_CONTEXT+ "\n\n" + MOVE_DRAFT + "\n\n" + choices[1]
# rsp = await llm.aask(prompt)
prompt = CODE_REVIEW_SMALLEST_CONTEXT + "\n\n" + MOVE_FUNCTION + "\n\n" + choices[0]
prompt = FUNCTION_TO_MERMAID_CLASS
_ = await llm.aask(prompt)
# if __name__ == "__main__":
# pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/test_context.py | tests/metagpt/test_context.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/1/9 13:52
@Author : alexanderwu
@File : test_context.py
"""
from metagpt.configs.llm_config import LLMType
from metagpt.context import AttrDict, Context
def test_attr_dict_1():
ad = AttrDict(name="John", age=30)
assert ad.name == "John"
assert ad.age == 30
assert ad.height is None
def test_attr_dict_2():
ad = AttrDict(name="John", age=30)
ad.height = 180
assert ad.height == 180
def test_attr_dict_3():
ad = AttrDict(name="John", age=30)
del ad.age
assert ad.age is None
def test_attr_dict_4():
ad = AttrDict(name="John", age=30)
try:
del ad.weight
except AttributeError as e:
assert str(e) == "No such attribute: weight"
def test_attr_dict_5():
ad = AttrDict.model_validate({"name": "John", "age": 30})
assert ad.name == "John"
assert ad.age == 30
def test_context_1():
ctx = Context()
assert ctx.config is not None
assert ctx.git_repo is None
assert ctx.src_workspace is None
assert ctx.cost_manager is not None
def test_context_2():
ctx = Context()
llm = ctx.config.get_openai_llm()
if ctx.config.llm.api_type == LLMType.OPENAI:
assert llm is not None
kwargs = ctx.kwargs
assert kwargs is not None
kwargs.test_key = "test_value"
assert kwargs.test_key == "test_value"
def test_context_3():
# ctx = Context()
# ctx.use_llm(provider=LLMType.OPENAI)
# assert ctx._llm_config is not None
# assert ctx._llm_config.api_type == LLMType.OPENAI
# assert ctx.llm() is not None
# assert "gpt" in ctx.llm().model
pass
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/test_context_mixin.py | tests/metagpt/test_context_mixin.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/1/11 19:24
@Author : alexanderwu
@File : test_context_mixin.py
"""
from pathlib import Path
import pytest
from pydantic import BaseModel
from metagpt.actions import Action
from metagpt.config2 import Config
from metagpt.const import CONFIG_ROOT
from metagpt.context_mixin import ContextMixin
from metagpt.environment import Environment
from metagpt.roles import Role
from metagpt.team import Team
from tests.metagpt.provider.mock_llm_config import (
mock_llm_config,
mock_llm_config_proxy,
mock_llm_config_zhipu,
)
class ModelX(ContextMixin, BaseModel):
a: str = "a"
b: str = "b"
class WTFMixin(BaseModel):
c: str = "c"
d: str = "d"
class ModelY(WTFMixin, ModelX):
pass
def test_config_mixin_1():
new_model = ModelX()
assert new_model.a == "a"
assert new_model.b == "b"
def test_config_mixin_2():
i = Config(llm=mock_llm_config)
j = Config(llm=mock_llm_config_proxy)
obj = ModelX(config=i)
assert obj.config == i
assert obj.config.llm == mock_llm_config
obj.set_config(j)
# obj already has a config, so it will not be set
assert obj.config == i
def test_config_mixin_3_multi_inheritance_not_override_config():
"""Test config mixin with multiple inheritance"""
i = Config(llm=mock_llm_config)
j = Config(llm=mock_llm_config_proxy)
obj = ModelY(config=i)
assert obj.config == i
assert obj.config.llm == mock_llm_config
obj.set_config(j)
# obj already has a config, so it will not be set
assert obj.config == i
assert obj.config.llm == mock_llm_config
assert obj.a == "a"
assert obj.b == "b"
assert obj.c == "c"
assert obj.d == "d"
print(obj.__dict__.keys())
assert "private_config" in obj.__dict__.keys()
def test_config_mixin_4_multi_inheritance_override_config():
"""Test config mixin with multiple inheritance"""
i = Config(llm=mock_llm_config)
j = Config(llm=mock_llm_config_zhipu)
obj = ModelY(config=i)
assert obj.config == i
assert obj.config.llm == mock_llm_config
obj.set_config(j, override=True)
# override obj.config
assert obj.config == j
assert obj.config.llm == mock_llm_config_zhipu
assert obj.a == "a"
assert obj.b == "b"
assert obj.c == "c"
assert obj.d == "d"
print(obj.__dict__.keys())
assert "private_config" in obj.__dict__.keys()
assert obj.config.llm.model == "mock_zhipu_model"
@pytest.mark.asyncio
async def test_config_priority():
"""If action's config is set, then its llm will be set, otherwise, it will use the role's llm"""
home_dir = Path.home() / CONFIG_ROOT
gpt4t = Config.from_home("gpt-4-turbo.yaml")
if not home_dir.exists():
assert gpt4t is None
gpt35 = Config.default()
gpt35.llm.model = "gpt-4-turbo"
gpt4 = Config.default()
gpt4.llm.model = "gpt-4-0613"
a1 = Action(config=gpt4t, name="Say", instruction="Say your opinion with emotion and don't repeat it")
a2 = Action(name="Say", instruction="Say your opinion with emotion and don't repeat it")
a3 = Action(name="Vote", instruction="Vote for the candidate, and say why you vote for him/her")
# it will not work for a1 because the config is already set
A = Role(name="A", profile="Democratic candidate", goal="Win the election", actions=[a1], watch=[a2], config=gpt4)
# it will work for a2 because the config is not set
B = Role(name="B", profile="Republican candidate", goal="Win the election", actions=[a2], watch=[a1], config=gpt4)
# ditto
C = Role(name="C", profile="Voter", goal="Vote for the candidate", actions=[a3], watch=[a1, a2], config=gpt35)
env = Environment(desc="US election live broadcast")
Team(investment=10.0, env=env, roles=[A, B, C])
assert a1.llm.model == "gpt-4-turbo" if Path(home_dir / "gpt-4-turbo.yaml").exists() else "gpt-4-0613"
assert a2.llm.model == "gpt-4-0613"
assert a3.llm.model == "gpt-4-turbo"
# history = await team.run(idea="Topic: climate change. Under 80 words per message.", send_to="a1", n_round=3)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/__init__.py | tests/metagpt/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/4/29 16:01
@Author : alexanderwu
@File : __init__.py
"""
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/test_message.py | tests/metagpt/test_message.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/16 10:57
@Author : alexanderwu
@File : test_message.py
@Modified By: mashenquan, 2023-11-1. Modify coding style.
"""
import pytest
from metagpt.schema import AIMessage, Message, SystemMessage, UserMessage
def test_message():
msg = Message(role="User", content="WTF")
assert msg.to_dict()["role"] == "User"
assert "User" in str(msg)
def test_all_messages():
test_content = "test_message"
msgs = [
UserMessage(test_content),
SystemMessage(test_content),
AIMessage(test_content),
Message(content=test_content, role="QA"),
]
for msg in msgs:
assert msg.content == test_content
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/test_subscription.py | tests/metagpt/test_subscription.py | import asyncio
import pytest
from metagpt.roles import Role
from metagpt.schema import Message
from metagpt.subscription import SubscriptionRunner
@pytest.mark.asyncio
async def test_subscription_run():
callback_done = 0
async def trigger():
while True:
yield Message(content="the latest news about OpenAI")
await asyncio.sleep(3600 * 24)
class MockRole(Role):
async def run(self, message=None):
return Message(content="")
async def callback(message):
nonlocal callback_done
callback_done += 1
runner = SubscriptionRunner()
roles = []
for _ in range(2):
role = MockRole()
roles.append(role)
await runner.subscribe(role, trigger(), callback)
task = asyncio.get_running_loop().create_task(runner.run())
for _ in range(10):
if callback_done == 2:
break
await asyncio.sleep(0)
else:
raise TimeoutError("callback not call")
role = roles[0]
assert role in runner.tasks
await runner.unsubscribe(roles[0])
for _ in range(10):
if role not in runner.tasks:
break
await asyncio.sleep(0)
else:
raise TimeoutError("callback not call")
task.cancel()
for i in runner.tasks.values():
i.cancel()
@pytest.mark.asyncio
async def test_subscription_run_error(loguru_caplog):
async def trigger1():
while True:
yield Message(content="the latest news about OpenAI")
await asyncio.sleep(3600 * 24)
async def trigger2():
yield Message(content="the latest news about OpenAI")
class MockRole1(Role):
async def run(self, message=None):
raise RuntimeError
class MockRole2(Role):
async def run(self, message=None):
return Message(content="")
async def callback(msg: Message):
print(msg)
runner = SubscriptionRunner()
await runner.subscribe(MockRole1(), trigger1(), callback)
with pytest.raises(RuntimeError):
await runner.run()
await runner.subscribe(MockRole2(), trigger2(), callback)
task = asyncio.get_running_loop().create_task(runner.run(False))
for _ in range(10):
if not runner.tasks:
break
await asyncio.sleep(0)
else:
raise TimeoutError("wait runner tasks empty timeout")
task.cancel()
for i in runner.tasks.values():
i.cancel()
assert len(loguru_caplog.records) >= 2
logs = "".join(loguru_caplog.messages)
assert "run error" in logs
assert "has completed" in logs
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/test_repo_parser.py | tests/metagpt/test_repo_parser.py | from pathlib import Path
from pprint import pformat
import pytest
from metagpt.const import METAGPT_ROOT
from metagpt.logs import logger
from metagpt.repo_parser import DotClassAttribute, DotClassMethod, DotReturn, RepoParser
def test_repo_parser():
repo_parser = RepoParser(base_directory=METAGPT_ROOT / "metagpt" / "strategy")
symbols = repo_parser.generate_symbols()
logger.info(pformat(symbols))
assert "tot_schema.py" in str(symbols)
output_path = repo_parser.generate_structure(mode="json")
assert output_path.exists()
output_path = repo_parser.generate_structure(mode="csv")
assert output_path.exists()
def test_error():
"""_parse_file should return empty list when file not existed"""
rsp = RepoParser._parse_file(Path("test_not_existed_file.py"))
assert rsp == []
@pytest.mark.parametrize(
("v", "name", "type_", "default_", "compositions"),
[
("children : dict[str, 'ActionNode']", "children", "dict[str,ActionNode]", "", ["ActionNode"]),
("context : str", "context", "str", "", []),
("example", "example", "", "", []),
("expected_type : Type", "expected_type", "Type", "", ["Type"]),
("args : Optional[Dict]", "args", "Optional[Dict]", "", []),
("rsp : Optional[Message] = Message.Default", "rsp", "Optional[Message]", "Message.Default", ["Message"]),
(
"browser : Literal['chrome', 'firefox', 'edge', 'ie']",
"browser",
"Literal['chrome','firefox','edge','ie']",
"",
[],
),
(
"browser : Dict[ Message, Literal['chrome', 'firefox', 'edge', 'ie'] ]",
"browser",
"Dict[Message,Literal['chrome','firefox','edge','ie']]",
"",
["Message"],
),
("attributes : List[ClassAttribute]", "attributes", "List[ClassAttribute]", "", ["ClassAttribute"]),
("attributes = []", "attributes", "", "[]", []),
(
"request_timeout: Optional[Union[float, Tuple[float, float]]]",
"request_timeout",
"Optional[Union[float,Tuple[float,float]]]",
"",
[],
),
],
)
def test_parse_member(v, name, type_, default_, compositions):
attr = DotClassAttribute.parse(v)
assert name == attr.name
assert type_ == attr.type_
assert default_ == attr.default_
assert compositions == attr.compositions
assert v == attr.description
json_data = attr.model_dump_json()
v = DotClassAttribute.model_validate_json(json_data)
assert v == attr
@pytest.mark.parametrize(
("line", "package_name", "info"),
[
(
'"metagpt.roles.architect.Architect" [color="black", fontcolor="black", label=<{Architect|constraints : str<br ALIGN="LEFT"/>goal : str<br ALIGN="LEFT"/>name : str<br ALIGN="LEFT"/>profile : str<br ALIGN="LEFT"/>|}>, shape="record", style="solid"];',
"metagpt.roles.architect.Architect",
"Architect|constraints : str\ngoal : str\nname : str\nprofile : str\n|",
),
(
'"metagpt.actions.skill_action.ArgumentsParingAction" [color="black", fontcolor="black", label=<{ArgumentsParingAction|args : Optional[Dict]<br ALIGN="LEFT"/>ask : str<br ALIGN="LEFT"/>prompt<br ALIGN="LEFT"/>rsp : Optional[Message]<br ALIGN="LEFT"/>skill<br ALIGN="LEFT"/>|parse_arguments(skill_name, txt): dict<br ALIGN="LEFT"/>run(with_message): Message<br ALIGN="LEFT"/>}>, shape="record", style="solid"];',
"metagpt.actions.skill_action.ArgumentsParingAction",
"ArgumentsParingAction|args : Optional[Dict]\nask : str\nprompt\nrsp : Optional[Message]\nskill\n|parse_arguments(skill_name, txt): dict\nrun(with_message): Message\n",
),
(
'"metagpt.strategy.base.BaseEvaluator" [color="black", fontcolor="black", label=<{BaseEvaluator|<br ALIGN="LEFT"/>|<I>status_verify</I>()<br ALIGN="LEFT"/>}>, shape="record", style="solid"];',
"metagpt.strategy.base.BaseEvaluator",
"BaseEvaluator|\n|<I>status_verify</I>()\n",
),
(
'"metagpt.configs.browser_config.BrowserConfig" [color="black", fontcolor="black", label=<{BrowserConfig|browser : Literal[\'chrome\', \'firefox\', \'edge\', \'ie\']<br ALIGN="LEFT"/>driver : Literal[\'chromium\', \'firefox\', \'webkit\']<br ALIGN="LEFT"/>engine<br ALIGN="LEFT"/>path : str<br ALIGN="LEFT"/>|}>, shape="record", style="solid"];',
"metagpt.configs.browser_config.BrowserConfig",
"BrowserConfig|browser : Literal['chrome', 'firefox', 'edge', 'ie']\ndriver : Literal['chromium', 'firefox', 'webkit']\nengine\npath : str\n|",
),
(
'"metagpt.tools.search_engine_serpapi.SerpAPIWrapper" [color="black", fontcolor="black", label=<{SerpAPIWrapper|aiosession : Optional[aiohttp.ClientSession]<br ALIGN="LEFT"/>model_config<br ALIGN="LEFT"/>params : dict<br ALIGN="LEFT"/>search_engine : Optional[Any]<br ALIGN="LEFT"/>serpapi_api_key : Optional[str]<br ALIGN="LEFT"/>|check_serpapi_api_key(val: str)<br ALIGN="LEFT"/>get_params(query: str): Dict[str, str]<br ALIGN="LEFT"/>results(query: str, max_results: int): dict<br ALIGN="LEFT"/>run(query, max_results: int, as_string: bool): str<br ALIGN="LEFT"/>}>, shape="record", style="solid"];',
"metagpt.tools.search_engine_serpapi.SerpAPIWrapper",
"SerpAPIWrapper|aiosession : Optional[aiohttp.ClientSession]\nmodel_config\nparams : dict\nsearch_engine : Optional[Any]\nserpapi_api_key : Optional[str]\n|check_serpapi_api_key(val: str)\nget_params(query: str): Dict[str, str]\nresults(query: str, max_results: int): dict\nrun(query, max_results: int, as_string: bool): str\n",
),
],
)
def test_split_class_line(line, package_name, info):
p, i = RepoParser._split_class_line(line)
assert p == package_name
assert i == info
@pytest.mark.parametrize(
("v", "name", "args", "return_args"),
[
(
"<I>arequest</I>(method, url, params, headers, files, stream: Literal[True], request_id: Optional[str], request_timeout: Optional[Union[float, Tuple[float, float]]]): Tuple[AsyncGenerator[OpenAIResponse, None], bool, str]",
"arequest",
[
DotClassAttribute(name="method", description="method"),
DotClassAttribute(name="url", description="url"),
DotClassAttribute(name="params", description="params"),
DotClassAttribute(name="headers", description="headers"),
DotClassAttribute(name="files", description="files"),
DotClassAttribute(name="stream", type_="Literal[True]", description="stream: Literal[True]"),
DotClassAttribute(name="request_id", type_="Optional[str]", description="request_id: Optional[str]"),
DotClassAttribute(
name="request_timeout",
type_="Optional[Union[float,Tuple[float,float]]]",
description="request_timeout: Optional[Union[float, Tuple[float, float]]]",
),
],
DotReturn(
type_="Tuple[AsyncGenerator[OpenAIResponse,None],bool,str]",
compositions=["AsyncGenerator", "OpenAIResponse"],
description="Tuple[AsyncGenerator[OpenAIResponse, None], bool, str]",
),
),
(
"<I>update</I>(subject: str, predicate: str, object_: str)",
"update",
[
DotClassAttribute(name="subject", type_="str", description="subject: str"),
DotClassAttribute(name="predicate", type_="str", description="predicate: str"),
DotClassAttribute(name="object_", type_="str", description="object_: str"),
],
DotReturn(description=""),
),
],
)
def test_parse_method(v, name, args, return_args):
method = DotClassMethod.parse(v)
assert method.name == name
assert method.args == args
assert method.return_args == return_args
assert method.description == v
json_data = method.model_dump_json()
v = DotClassMethod.model_validate_json(json_data)
assert v == method
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/test_document.py | tests/metagpt/test_document.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/1/2 21:00
@Author : alexanderwu
@File : test_document.py
"""
from metagpt.config2 import config
from metagpt.document import Repo
from metagpt.logs import logger
def set_existing_repo(path):
repo1 = Repo.from_path(path)
repo1.set("doc/wtf_file.md", "wtf content")
repo1.set("code/wtf_file.py", "def hello():\n print('hello')")
logger.info(repo1) # check doc
def load_existing_repo(path):
repo = Repo.from_path(path)
logger.info(repo)
logger.info(repo.eda())
assert repo
assert repo.get("doc/wtf_file.md").content == "wtf content"
assert repo.get("code/wtf_file.py").content == "def hello():\n print('hello')"
def test_repo_set_load():
repo_path = config.workspace.path / "test_repo"
set_existing_repo(repo_path)
load_existing_repo(repo_path)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/test_schema.py | tests/metagpt/test_schema.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/20 10:40
@Author : alexanderwu
@File : test_schema.py
@Modified By: mashenquan, 2023-11-1. In line with Chapter 2.2.1 and 2.2.2 of RFC 116, introduce unit tests for
the utilization of the new feature of `Message` class.
"""
import json
from typing import Annotated
import pytest
from pydantic import BaseModel, Field
from metagpt.actions import Action
from metagpt.actions.action_node import ActionNode
from metagpt.actions.write_code import WriteCode
from metagpt.const import SERDESER_PATH, SYSTEM_DESIGN_FILE_REPO, TASK_FILE_REPO
from metagpt.schema import (
AIMessage,
CodeSummarizeContext,
Document,
Message,
MessageQueue,
Plan,
SerializationMixin,
SystemMessage,
Task,
UMLClassAttribute,
UMLClassMethod,
UMLClassView,
UserMessage,
)
from metagpt.utils.common import any_to_str
def test_messages():
test_content = "test_message"
msgs = [
UserMessage(content=test_content),
SystemMessage(content=test_content),
AIMessage(content=test_content),
Message(content=test_content, role="QA"),
]
text = str(msgs)
roles = ["user", "system", "assistant", "QA"]
assert all([i in text for i in roles])
def test_message():
Message("a", role="v1")
m = Message(content="a", role="v1")
v = m.dump()
d = json.loads(v)
assert d
assert d.get("content") == "a"
assert d.get("role") == "v1"
m.role = "v2"
v = m.dump()
assert v
m = Message.load(v)
assert m.content == "a"
assert m.role == "v2"
m = Message(content="a", role="b", cause_by="c", x="d", send_to="c")
assert m.content == "a"
assert m.role == "b"
assert m.send_to == {"c"}
assert m.cause_by == "c"
m.sent_from = "e"
assert m.sent_from == "e"
m.cause_by = "Message"
assert m.cause_by == "Message"
m.cause_by = Action
assert m.cause_by == any_to_str(Action)
m.cause_by = Action()
assert m.cause_by == any_to_str(Action)
m.content = "b"
assert m.content == "b"
def test_routes():
m = Message(content="a", role="b", cause_by="c", x="d", send_to="c")
m.send_to = "b"
assert m.send_to == {"b"}
m.send_to = {"e", Action}
assert m.send_to == {"e", any_to_str(Action)}
def test_message_serdeser():
out_mapping = {"field3": (str, ...), "field4": (list[str], ...)}
out_data = {"field3": "field3 value3", "field4": ["field4 value1", "field4 value2"]}
ic_obj = ActionNode.create_model_class("code", out_mapping)
message = Message(content="code", instruct_content=ic_obj(**out_data), role="engineer", cause_by=WriteCode)
message_dict = message.model_dump()
assert message_dict["cause_by"] == "metagpt.actions.write_code.WriteCode"
assert message_dict["instruct_content"] == {
"class": "code",
"mapping": {"field3": "(<class 'str'>, Ellipsis)", "field4": "(list[str], Ellipsis)"},
"value": {"field3": "field3 value3", "field4": ["field4 value1", "field4 value2"]},
}
new_message = Message.model_validate(message_dict)
assert new_message.content == message.content
assert new_message.instruct_content.model_dump() == message.instruct_content.model_dump()
assert new_message.instruct_content == message.instruct_content # TODO
assert new_message.cause_by == message.cause_by
assert new_message.instruct_content.field3 == out_data["field3"]
message = Message(content="code")
message_dict = message.model_dump()
new_message = Message(**message_dict)
assert new_message.instruct_content is None
assert new_message.cause_by == "metagpt.actions.add_requirement.UserRequirement"
assert not Message.load("{")
def test_document():
doc = Document(root_path="a", filename="b", content="c")
meta_doc = doc.get_meta()
assert doc.root_path == meta_doc.root_path
assert doc.filename == meta_doc.filename
assert meta_doc.content == ""
@pytest.mark.asyncio
async def test_message_queue():
mq = MessageQueue()
val = await mq.dump()
assert val == "[]"
mq.push(Message(content="1"))
mq.push(Message(content="2中文测试aaa"))
msg = mq.pop()
assert msg.content == "1"
val = await mq.dump()
assert val
new_mq = MessageQueue.load(val)
assert new_mq.pop_all() == mq.pop_all()
@pytest.mark.parametrize(
("file_list", "want"),
[
(
[f"{SYSTEM_DESIGN_FILE_REPO}/a.txt", f"{TASK_FILE_REPO}/b.txt"],
CodeSummarizeContext(
design_filename=f"{SYSTEM_DESIGN_FILE_REPO}/a.txt", task_filename=f"{TASK_FILE_REPO}/b.txt"
),
)
],
)
def test_CodeSummarizeContext(file_list, want):
ctx = CodeSummarizeContext.loads(file_list)
assert ctx == want
m = {ctx: ctx}
assert want in m
def test_class_view():
attr_a = UMLClassAttribute(name="a", value_type="int", default_value="0", visibility="+")
assert attr_a.get_mermaid(align=1) == "\t+int a=0"
attr_b = UMLClassAttribute(name="b", value_type="str", default_value="0", visibility="#")
assert attr_b.get_mermaid(align=0) == '#str b="0"'
class_view = UMLClassView(name="A")
class_view.attributes = [attr_a, attr_b]
method_a = UMLClassMethod(name="run", visibility="+")
assert method_a.get_mermaid(align=1) == "\t+run()"
method_b = UMLClassMethod(
name="_test",
visibility="#",
args=[UMLClassAttribute(name="a", value_type="str"), UMLClassAttribute(name="b", value_type="int")],
return_type="str",
)
assert method_b.get_mermaid(align=0) == "#_test(str a,int b) str"
class_view.methods = [method_a, method_b]
assert (
class_view.get_mermaid(align=0)
== 'class A{\n\t+int a=0\n\t#str b="0"\n\t+run()\n\t#_test(str a,int b) str\n}\n'
)
class TestPlan:
def test_add_tasks_ordering(self):
plan = Plan(goal="")
tasks = [
Task(task_id="1", dependent_task_ids=["2", "3"], instruction="Third"),
Task(task_id="2", instruction="First"),
Task(task_id="3", dependent_task_ids=["2"], instruction="Second"),
] # 2 -> 3 -> 1
plan.add_tasks(tasks)
assert [task.task_id for task in plan.tasks] == ["2", "3", "1"]
def test_add_tasks_to_existing_no_common_prefix(self):
plan = Plan(goal="")
tasks = [
Task(task_id="1", dependent_task_ids=["2", "3"], instruction="Third"),
Task(task_id="2", instruction="First"),
Task(task_id="3", dependent_task_ids=["2"], instruction="Second", is_finished=True),
] # 2 -> 3 -> 1
plan.add_tasks(tasks)
new_tasks = [Task(task_id="3", instruction="")]
plan.add_tasks(new_tasks)
assert [task.task_id for task in plan.tasks] == ["3"]
assert not plan.tasks[0].is_finished # must be the new unfinished task
def test_add_tasks_to_existing_with_common_prefix(self):
plan = Plan(goal="")
tasks = [
Task(task_id="1", dependent_task_ids=["2", "3"], instruction="Third"),
Task(task_id="2", instruction="First"),
Task(task_id="3", dependent_task_ids=["2"], instruction="Second"),
] # 2 -> 3 -> 1
plan.add_tasks(tasks)
plan.finish_current_task() # finish 2
plan.finish_current_task() # finish 3
new_tasks = [
Task(task_id="4", dependent_task_ids=["3"], instruction="Third"),
Task(task_id="2", instruction="First"),
Task(task_id="3", dependent_task_ids=["2"], instruction="Second"),
] # 2 -> 3 -> 4, so the common prefix is 2 -> 3, and these two should be obtained from the existing tasks
plan.add_tasks(new_tasks)
assert [task.task_id for task in plan.tasks] == ["2", "3", "4"]
assert (
plan.tasks[0].is_finished and plan.tasks[1].is_finished
) # "2" and "3" should be the original finished one
assert plan.current_task_id == "4"
def test_current_task(self):
plan = Plan(goal="")
tasks = [
Task(task_id="1", dependent_task_ids=["2"], instruction="Second"),
Task(task_id="2", instruction="First"),
]
plan.add_tasks(tasks)
assert plan.current_task.task_id == "2"
def test_finish_task(self):
plan = Plan(goal="")
tasks = [
Task(task_id="1", instruction="First"),
Task(task_id="2", dependent_task_ids=["1"], instruction="Second"),
]
plan.add_tasks(tasks)
plan.finish_current_task()
assert plan.current_task.task_id == "2"
def test_finished_tasks(self):
plan = Plan(goal="")
tasks = [
Task(task_id="1", instruction="First"),
Task(task_id="2", dependent_task_ids=["1"], instruction="Second"),
]
plan.add_tasks(tasks)
plan.finish_current_task()
finished_tasks = plan.get_finished_tasks()
assert len(finished_tasks) == 1
assert finished_tasks[0].task_id == "1"
def test_reset_task_existing(self):
plan = Plan(goal="")
task = Task(task_id="1", instruction="Do something", code="print('Hello')", result="Hello", finished=True)
plan.add_tasks([task])
plan.reset_task("1")
reset_task = plan.task_map["1"]
assert reset_task.code == ""
assert reset_task.result == ""
assert not reset_task.is_finished
def test_reset_task_non_existing(self):
plan = Plan(goal="")
task = Task(task_id="1", instruction="Do something", code="print('Hello')", result="Hello", finished=True)
plan.add_tasks([task])
plan.reset_task("2") # Task with ID 2 does not exist
assert "1" in plan.task_map
assert "2" not in plan.task_map
def test_replace_task_with_dependents(self):
plan = Plan(goal="")
tasks = [
Task(task_id="1", instruction="First Task", finished=True),
Task(task_id="2", instruction="Second Task", dependent_task_ids=["1"], finished=True),
]
plan.add_tasks(tasks)
new_task = Task(task_id="1", instruction="Updated First Task")
plan.replace_task(new_task)
assert plan.task_map["1"].instruction == "Updated First Task"
assert not plan.task_map["2"].is_finished # Dependent task should be reset
assert plan.task_map["2"].code == ""
assert plan.task_map["2"].result == ""
def test_replace_task_non_existing(self):
plan = Plan(goal="")
task = Task(task_id="1", instruction="First Task")
plan.add_tasks([task])
new_task = Task(task_id="2", instruction="New Task")
with pytest.raises(AssertionError):
plan.replace_task(new_task) # Task with ID 2 does not exist in plan
assert "1" in plan.task_map
assert "2" not in plan.task_map
def test_append_task_with_valid_dependencies(self):
plan = Plan(goal="Test")
existing_task = [Task(task_id="1")]
plan.add_tasks(existing_task)
new_task = Task(task_id="2", dependent_task_ids=["1"])
plan.append_task(new_task)
assert plan.tasks[-1].task_id == "2"
assert plan.task_map["2"] == new_task
def test_append_task_with_invalid_dependencies(self):
new_task = Task(task_id="2", dependent_task_ids=["3"])
plan = Plan(goal="Test")
with pytest.raises(AssertionError):
plan.append_task(new_task)
def test_append_task_without_dependencies(self):
plan = Plan(goal="Test")
existing_task = [Task(task_id="1")]
plan.add_tasks(existing_task)
new_task = Task(task_id="2")
plan.append_task(new_task)
assert len(plan.tasks) == 2
assert plan.current_task_id == "1"
def test_append_task_updates_current_task(self):
finished_task = Task(task_id="1", is_finished=True)
new_task = Task(task_id="2")
plan = Plan(goal="Test", tasks=[finished_task])
plan.append_task(new_task)
assert plan.current_task_id == "2"
def test_update_current_task(self):
task1 = Task(task_id="1", is_finished=True)
task2 = Task(task_id="2")
plan = Plan(goal="Test", tasks=[task1, task2])
plan._update_current_task()
assert plan.current_task_id == "2"
@pytest.mark.parametrize(
("content", "key_descriptions"),
[
(
"""
Traceback (most recent call last):
File "/Users/iorishinier/github/MetaGPT/workspace/game_2048_1/game_2048/main.py", line 38, in <module>
Main().main()
File "/Users/iorishinier/github/MetaGPT/workspace/game_2048_1/game_2048/main.py", line 28, in main
self.user_interface.draw()
File "/Users/iorishinier/github/MetaGPT/workspace/game_2048_1/game_2048/user_interface.py", line 16, in draw
if grid[i][j] != 0:
TypeError: 'Grid' object is not subscriptable
""",
{
"filename": "the string type of the path name of the source code where the bug resides",
"line": "the integer type of the line error occurs",
"function_name": "the string type of the function name the error occurs in",
"code": "the string type of the codes where the error occurs at",
"info": "the string type of the error information",
},
),
(
"将代码提交到github上的iorisa/repo1的branch1分支,发起pull request ,合并到master分支。",
{
"repo_name": "the string type of github repo to create pull",
"head": "the string type of github branch to be pushed",
"base": "the string type of github branch to merge the changes into",
},
),
],
)
async def test_parse_resources(context, content: str, key_descriptions):
msg = Message(content=content)
llm = context.llm_with_cost_manager_from_llm_config(context.config.llm)
result = await msg.parse_resources(llm=llm, key_descriptions=key_descriptions)
assert result
assert result.get("resources")
for k in key_descriptions.keys():
assert k in result
@pytest.mark.parametrize(("name", "value"), [("c1", {"age": 10, "name": "Alice"}), ("", {"path": __file__})])
def test_create_instruct_value(name, value):
obj = Message.create_instruct_value(kvs=value, class_name=name)
assert obj.model_dump() == value
class TestUserModel(SerializationMixin, BaseModel):
name: str
value: int
class TestUserModelWithExclude(TestUserModel):
age: Annotated[int, Field(exclude=True)]
class TestSerializationMixin:
@pytest.fixture
def mock_write_json_file(self, mocker):
return mocker.patch("metagpt.schema.write_json_file")
@pytest.fixture
def mock_read_json_file(self, mocker):
return mocker.patch("metagpt.schema.read_json_file")
@pytest.fixture
def mock_user_model(self):
return TestUserModel(name="test", value=42)
def test_serialize(self, mock_write_json_file, mock_user_model):
file_path = "test.json"
mock_user_model.serialize(file_path)
mock_write_json_file.assert_called_once_with(file_path, mock_user_model.model_dump())
def test_deserialize(self, mock_read_json_file):
file_path = "test.json"
data = {"name": "test", "value": 42}
mock_read_json_file.return_value = data
model = TestUserModel.deserialize(file_path)
mock_read_json_file.assert_called_once_with(file_path)
assert model == TestUserModel(**data)
def test_serialize_with_exclude(self, mock_write_json_file):
model = TestUserModelWithExclude(name="test", value=42, age=10)
file_path = "test.json"
model.serialize(file_path)
expected_data = {
"name": "test",
"value": 42,
"__module_class_name": "tests.metagpt.test_schema.TestUserModelWithExclude",
}
mock_write_json_file.assert_called_once_with(file_path, expected_data)
def test_get_serialization_path(self):
expected_path = str(SERDESER_PATH / "TestUserModel.json")
assert TestUserModel.get_serialization_path() == expected_path
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/management/test_skill_manager.py | tests/metagpt/management/test_skill_manager.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/6/6 12:38
@Author : alexanderwu
@File : test_skill_manager.py
"""
from metagpt.actions import WritePRD, WriteTest
from metagpt.logs import logger
from metagpt.management.skill_manager import SkillManager
def test_skill_manager():
manager = SkillManager()
logger.info(manager._store)
write_prd = WritePRD(name="WritePRD")
write_prd.desc = "基于老板或其他人的需求进行PRD的撰写,包括用户故事、需求分解等"
write_test = WriteTest(name="WriteTest")
write_test.desc = "进行测试用例的撰写"
manager.add_skill(write_prd)
manager.add_skill(write_test)
skill = manager.get_skill("WriteTest")
logger.info(skill)
rsp = manager.retrieve_skill("WritePRD")
logger.info(rsp)
assert rsp[0] == "WritePRD"
rsp = manager.retrieve_skill("写测试用例")
logger.info(rsp)
assert rsp[0] == "WriteTest"
rsp = manager.retrieve_skill_scored("写PRD")
logger.info(rsp)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/management/__init__.py | tests/metagpt/management/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/6/6 12:38
@Author : alexanderwu
@File : __init__.py
"""
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/test_web_browser_engine.py | tests/metagpt/tools/test_web_browser_engine.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from metagpt.tools import WebBrowserEngineType, web_browser_engine
from metagpt.utils.parse_html import WebPage
@pytest.mark.asyncio
@pytest.mark.parametrize(
"browser_type",
[
WebBrowserEngineType.PLAYWRIGHT,
WebBrowserEngineType.SELENIUM,
],
ids=["playwright", "selenium"],
)
async def test_scrape_web_page(browser_type, http_server):
server, url = await http_server()
urls = [url, url, url]
browser = web_browser_engine.WebBrowserEngine(engine=browser_type)
result = await browser.run(url)
assert isinstance(result, WebPage)
assert "MetaGPT" in result.inner_text
if urls:
results = await browser.run(url, *urls)
assert isinstance(results, list)
assert len(results) == len(urls) + 1
assert all(("MetaGPT" in i.inner_text) for i in results)
await server.stop()
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/test_web_browser_engine_playwright.py | tests/metagpt/tools/test_web_browser_engine_playwright.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from metagpt.tools import web_browser_engine_playwright
from metagpt.utils.parse_html import WebPage
@pytest.mark.asyncio
@pytest.mark.parametrize(
"browser_type, use_proxy, kwagrs,",
[
("chromium", {"proxy": True}, {}),
(
"firefox",
{},
{"ignore_https_errors": True},
),
(
"webkit",
{},
{"ignore_https_errors": True},
),
],
ids=["chromium-normal", "firefox-normal", "webkit-normal"],
)
async def test_scrape_web_page(browser_type, use_proxy, kwagrs, proxy, capfd, http_server):
server, url = await http_server()
urls = [url, url, url]
proxy_url = None
if use_proxy:
proxy_server, proxy_url = await proxy()
browser = web_browser_engine_playwright.PlaywrightWrapper(browser_type=browser_type, proxy=proxy_url, **kwagrs)
result = await browser.run(url)
assert isinstance(result, WebPage)
assert "MetaGPT" in result.inner_text
if urls:
results = await browser.run(url, *urls)
assert isinstance(results, list)
assert len(results) == len(urls) + 1
assert all(("MetaGPT" in i.inner_text) for i in results)
if use_proxy:
proxy_server.close()
await proxy_server.wait_closed()
assert "Proxy:" in capfd.readouterr().out
await server.stop()
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/test_prompt_writer.py | tests/metagpt/tools/test_prompt_writer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/2 17:46
@Author : alexanderwu
@File : test_prompt_writer.py
"""
import pytest
from metagpt.logs import logger
from metagpt.tools.prompt_writer import (
BEAGECTemplate,
EnronTemplate,
GPTPromptGenerator,
WikiHowTemplate,
)
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_api")
async def test_gpt_prompt_generator(llm_api):
generator = GPTPromptGenerator()
example = (
"商品名称:WonderLab 新肌果味代餐奶昔 小胖瓶 胶原蛋白升级版 饱腹代餐粉6瓶 75g/瓶(6瓶/盒) 店铺名称:金力宁食品专营店 " "品牌:WonderLab 保质期:1年 产地:中国 净含量:450g"
)
results = await llm_api.aask_batch(generator.gen(example))
logger.info(results)
assert len(results) > 0
@pytest.mark.usefixtures("llm_api")
def test_wikihow_template(llm_api):
template = WikiHowTemplate()
question = "learn Python"
step = 5
results = template.gen(question, step)
assert len(results) > 0
assert any("Give me 5 steps to learn Python." in r for r in results)
@pytest.mark.usefixtures("llm_api")
def test_enron_template(llm_api):
template = EnronTemplate()
subj = "Meeting Agenda"
results = template.gen(subj)
assert len(results) > 0
assert any('Write an email with the subject "Meeting Agenda".' in r for r in results)
def test_beagec_template():
template = BEAGECTemplate()
results = template.gen()
assert len(results) > 0
assert any(
"Edit and revise this document to improve its grammar, vocabulary, spelling, and style." in r for r in results
)
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/test_iflytek_tts.py | tests/metagpt/tools/test_iflytek_tts.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/12/26
@Author : mashenquan
@File : test_iflytek_tts.py
"""
import pytest
from metagpt.config2 import config
from metagpt.tools.iflytek_tts import IFlyTekTTS, oas3_iflytek_tts
@pytest.mark.asyncio
async def test_iflytek_tts(mocker):
# mock
config.azure_tts_subscription_key = None
config.azure_tts_region = None
mocker.patch.object(IFlyTekTTS, "synthesize_speech", return_value=None)
mock_data = mocker.AsyncMock()
mock_data.read.return_value = b"mock iflytek"
mock_reader = mocker.patch("aiofiles.open")
mock_reader.return_value.__aenter__.return_value = mock_data
# Prerequisites
assert config.iflytek_app_id
assert config.iflytek_api_key
assert config.iflytek_api_secret
result = await oas3_iflytek_tts(
text="你好,hello",
app_id=config.iflytek_app_id,
api_key=config.iflytek_api_key,
api_secret=config.iflytek_api_secret,
)
assert result
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/test_azure_tts.py | tests/metagpt/tools/test_azure_tts.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/7/1 22:50
@Author : alexanderwu
@File : test_azure_tts.py
@Modified By: mashenquan, 2023-8-9, add more text formatting options
@Modified By: mashenquan, 2023-8-17, move to `tools` folder.
"""
from pathlib import Path
import pytest
from azure.cognitiveservices.speech import ResultReason, SpeechSynthesizer
from metagpt.config2 import config
from metagpt.tools.azure_tts import AzureTTS
@pytest.mark.asyncio
async def test_azure_tts(mocker):
# mock
mock_result = mocker.Mock()
mock_result.audio_data = b"mock audio data"
mock_result.reason = ResultReason.SynthesizingAudioCompleted
mock_data = mocker.Mock()
mock_data.get.return_value = mock_result
mocker.patch.object(SpeechSynthesizer, "speak_ssml_async", return_value=mock_data)
mocker.patch.object(Path, "exists", return_value=True)
# Prerequisites
assert config.azure_tts_subscription_key and config.azure_tts_subscription_key != "YOUR_API_KEY"
assert config.azure_tts_region
azure_tts = AzureTTS(subscription_key=config.azure_tts_subscription_key, region=config.azure_tts_region)
text = """
女儿看见父亲走了进来,问道:
<mstts:express-as role="YoungAdultFemale" style="calm">
“您来的挺快的,怎么过来的?”
</mstts:express-as>
父亲放下手提包,说:
<mstts:express-as role="OlderAdultMale" style="calm">
“Writing a binary file in Python is similar to writing a regular text file, but you'll work with bytes instead of strings.”
</mstts:express-as>
"""
path = config.workspace.path / "tts"
path.mkdir(exist_ok=True, parents=True)
filename = path / "girl.wav"
filename.unlink(missing_ok=True)
result = await azure_tts.synthesize_speech(
lang="zh-CN", voice="zh-CN-XiaomoNeural", text=text, output_file=str(filename)
)
print(result)
assert result
assert result.audio_data
assert result.reason == ResultReason.SynthesizingAudioCompleted
assert filename.exists()
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/test_summarize.py | tests/metagpt/tools/test_summarize.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/2 17:46
@Author : alexanderwu
@File : test_summarize.py
"""
import pytest
CASES = [
"""# 上下文
[{'title': '抗痘 / 控油 / 毛孔調理 臉部保養 商品 | 屈臣氏 Watsons', 'href': 'https://www.watsons.com.tw/%E8%87%89%E9%83%A8%E4%BF%9D%E9%A4%8A/%E6%8A%97%E7%97%98-%E6%8E%A7%E6%B2%B9-%E6%AF%9B%E5%AD%94%E8%AA%BF%E7%90%86/c/10410601', 'body': '抗痘 / 控油 / 毛孔調理等臉部保養用品盡在屈臣氏,多樣抗痘 / 控油 / 毛孔調理商品全面符合您的需求。3M, 3M Nexcare, ARIN, Biore 蜜妮, CEZANNE等眾多推薦品牌快來屈臣氏選購。'}, {'title': '有哪些祛痘印产品曾惊艳过你? - 知乎', 'href': 'https://www.zhihu.com/question/380098171', 'body': '有哪些祛痘印产品曾惊艳过你? ... 素姬水杨酸精华 祛痘产品里绝对不能少了水杨酸这个成分!用这个品牌主要是信赖它的温和性,而且价格便宜,去粉刺痘痘效果又好,对闭口和黑头都有效果。 ... 购买比较方便,我在屈臣氏买的,50RMB. 西班牙IFC duo祛痘凝露 ...'}, {'title': '屈臣氏祛痘系列_百度知道', 'href': 'https://zhidao.baidu.com/question/581355167.html', 'body': '2014-08-28 屈臣氏里有哪些祛痘效果好的产品? 26 2007-08-25 屈臣氏有卖哪些祛痘产品 61 2019-05-27 屈臣氏有哪些祛痘产品 什么方法会比较好?? 2015-09-27 屈臣氏白金祛痘系列的使用顺序 30 2014-11-03 屈臣氏卖的祛痘产品叫什么 1 2011-05-24 屈臣氏的祛痘好用的产品有那些 ...'}, {'title': '屈臣氏里有哪些祛痘效果好的产品? - 百度知道', 'href': 'https://zhidao.baidu.com/question/360679400530686652.html', 'body': '阿达帕林是一款医药系列的祛痘产品,它里面蕴含了非常丰富的甲酸类化合物,涂抹在皮肤上会有很好的消炎效果,对于粉刺、闭口、痘痘等痤疮系列的皮肤问题也有很好的修复,可以让毛囊上的皮肤细胞正常分化。. 用户实测评分:9.663分. 实验室效果评测:9. ...'}, {'title': '33款屈臣氏最值得买的好物! - 知乎 - 知乎专栏', 'href': 'https://zhuanlan.zhihu.com/p/31366278', 'body': '屈臣氏深层卸妆棉. 19.9元/25*2. 一般出差不想带很多瓶瓶罐罐就会带卸妆棉,当时是买一送一,就觉得超划算。. 棉质很好,很舒服,厚度适中,温和不刺激,淡淡的香味,卸得很舒心,卸得也很干净。. 眼妆也可以用这个卸,因为它不含酒精,所以一点也不辣 ...'}, {'title': '屈臣氏官网 - Watsons', 'href': 'https://www.watsons.com.cn/', 'body': '屈臣氏百年正品口碑,现金优惠多多多,2小时闪电送到家,还能屈臣氏门店自提。美妆洗护,口腔保健,日用百货,男士护理,更便捷的操作,满足你更多。屈臣氏始创于1841年,线下门店覆盖全球12个国家地区,超过5500家门店。在中国,400多个城市已超过3000家门店,6000万名会员与你一起放心买好货!'}, {'title': '15款日本最具口碑的祛痘神器! - 知乎 - 知乎专栏', 'href': 'https://zhuanlan.zhihu.com/p/63349036', 'body': '乐敦. Acnes药用祛痘抗痘粉尘暗疮药膏. 药用抗痘药膏清爽啫哩質地,维生素E衍生物,维生素B6组合,膏体不腻,轻透很好吸收,淡淡清香味主要针对红肿且疼痛的大颗痘痘,排出脓液、杀灭细菌、消除红肿,第二天就会有效果。. DHC. 祛痘净痘调理精华. 含有o-Cymen ...'}, {'title': '请问屈臣氏什么产品可以去痘疤的 - Sina', 'href': 'https://iask.sina.com.cn/b/1STygN4RT2wZ.html', 'body': '请问屈臣氏什么产品可以去痘疤的本人很少长痘痘,偶尔冒几颗。脸颊上的痘痘来的快去的快,不怎么留疤,就是额头和下巴嘴角边的痘痘感觉超级敏感,一挤就留疤,苦恼! ... 想问下屈臣氏有什么产品能去痘疤的,要有效哦~谢谢各位了! ...'}, {'title': '屈臣氏祛痘凝胶新款 - 屈臣氏祛痘凝胶2021年新款 - 京东', 'href': 'https://www.jd.com/xinkuan/16729c68245569aae4c3.html', 'body': '屈臣氏芦荟凝胶清凉滋润舒缓祛痘印痘坑痘疤补水保湿晒后修复凝胶 【保湿芦荟凝胶】3瓶900g. 2+ 条评论. 屈臣氏 Leaf Simple简单叶子水杨酸祛痘凝胶去痘印粉刺闭口淡化痘坑研春堂收缩毛孔改善粉刺 两支. 4+ 条评论. 屈臣氏 Leaf Simple简单叶子水杨酸祛痘凝胶去痘印 ...'}]
# 用户搜索请求
屈臣氏有什么产品可以去痘?
# 要求
你是专业管家团队的一员,会给出有帮助的建议
1. 请根据上下文,对用户搜索请求进行总结性回答,不要包括与请求无关的文本
2. 以 [正文](引用链接) markdown形式在正文中**自然标注**~5个文本(如商品词或类似文本段),以便跳转
3. 回复优雅、清晰,**绝不重复文本**,行文流畅,长度居中""",
"""# 上下文
[{'title': '去厦门 有哪些推荐的美食? - 知乎', 'href': 'https://www.zhihu.com/question/286901854', 'body': '知乎,中文互联网高质量的问答社区和创作者聚集的原创内容平台,于 2011 年 1 月正式上线,以「让人们更好的分享知识、经验和见解,找到自己的解答」为品牌使命。知乎凭借认真、专业、友善的社区氛围、独特的产品机制以及结构化和易获得的优质内容,聚集了中文互联网科技、商业、影视 ...'}, {'title': '厦门到底有哪些真正值得吃的美食? - 知乎', 'href': 'https://www.zhihu.com/question/38012322', 'body': '有几个特色菜在别处不太能吃到,值得一试~常点的有西多士、沙茶肉串、咕老肉(个人认为还是良山排档的更炉火纯青~),因为爱吃芋泥,每次还会点一个芋泥鸭~人均50元左右. 潮福城. 厦门这两年经营港式茶点的店越来越多,但是最经典的还是潮福城的茶点 ...'}, {'title': '超全厦门美食攻略,好吃不贵不踩雷 - 知乎 - 知乎专栏', 'href': 'https://zhuanlan.zhihu.com/p/347055615', 'body': '厦门老字号店铺,味道卫生都有保障,喜欢吃芒果的,不要错过芒果牛奶绵绵冰. 285蚝味馆 70/人. 上过《舌尖上的中国》味道不用多说,想吃地道的海鲜烧烤就来这里. 堂宴.老厦门私房菜 80/人. 非常多的明星打卡过,上过《十二道锋味》,吃厦门传统菜的好去处 ...'}, {'title': '福建名小吃||寻味厦门,十大特色名小吃,你都吃过哪几样? - 知乎', 'href': 'https://zhuanlan.zhihu.com/p/375781836', 'body': '第一期,分享厦门的特色美食。 厦门是一个风景旅游城市,许多人来到厦门,除了游览厦门独特的风景之外,最难忘的应该是厦门的特色小吃。厦门小吃多种多样,有到厦门必吃的沙茶面、米线糊、蚵仔煎、土笋冻等非常之多。那么,厦门的名小吃有哪些呢?'}, {'title': '大家如果去厦门旅游的话,好吃的有很多,但... 来自庄时利和 - 微博', 'href': 'https://weibo.com/1728715190/MEAwzscRT', 'body': '大家如果去厦门旅游的话,好吃的有很多,但如果只选一样的话,我个人会选择莲花煎蟹。 靠海吃海,吃蟹对于闽南人来说是很平常的一件事。 厦门传统的做法多是清蒸或水煮,上世纪八十年代有一同安人在厦门的莲花公园旁,摆摊做起了煎蟹的生意。'}, {'title': '厦门美食,厦门美食攻略,厦门旅游美食攻略 - 马蜂窝', 'href': 'https://www.mafengwo.cn/cy/10132/gonglve.html', 'body': '醉壹号海鲜大排档 (厦门美食地标店) No.3. 哆啦Eanny 的最新点评:. 环境 挺复古的闽南风情,花砖地板,一楼有海鲜自己点菜,二楼室内位置,三楼露天位置,环境挺不错的。. 苦螺汤,看起来挺清的,螺肉吃起来很脆。. 姜... 5.0 分. 482 条用户点评.'}, {'title': '厦门超强中山路小吃合集,29家本地人推荐的正宗美食 - 马蜂窝', 'href': 'https://www.mafengwo.cn/gonglve/ziyouxing/176485.html', 'body': '莲欢海蛎煎. 提到厦门就想到海蛎煎,而这家位于中山路局口街的莲欢海蛎煎是实打实的好吃!. ·局口街老巷之中,全室外环境,吃的就是这种感觉。. ·取名"莲欢",是希望妻子每天开心。. 新鲜的食材,实在的用料,这样的用心也定能讨食客欢心。. ·海蛎又 ...'}, {'title': '厦门市 10 大餐厅- Tripadvisor', 'href': 'https://cn.tripadvisor.com/Restaurants-g297407-Xiamen_Fujian.html', 'body': '厦门市餐厅:在Tripadvisor查看中国厦门市餐厅的点评,并以价格、地点及更多选项进行搜索。 ... "牛排太好吃了啊啊啊" ... "厦门地区最老品牌最有口碑的潮州菜餐厅" ...'}, {'title': '#福建10条美食街简直不要太好吃#每到一... 来自新浪厦门 - 微博', 'href': 'https://weibo.com/1740522895/MF1lY7W4n', 'body': '福建的这10条美食街,你一定不能错过!福州师大学生街、福州达明路美食街、厦门八市、漳州古城老街、宁德老南门电影院美食集市、龙岩中山路美食街、三明龙岗夜市、莆田金鼎夜市、莆田玉湖夜市、南平嘉禾美食街。世间万事皆难,唯有美食可以治愈一切。'}, {'title': '厦门这50家餐厅最值得吃 - 腾讯新闻', 'href': 'https://new.qq.com/rain/a/20200114A09HJT00', 'body': '没有什么事是一顿辣解决不了的! 创意辣、川湘辣、温柔辣、异域辣,芙蓉涧的菜能把辣椒玩出花来! ... 早在2005年,这家老牌的东南亚餐厅就开在厦门莲花了,在许多老厦门的心中,都觉得这里有全厦门最好吃的咖喱呢。 ...'}, {'title': '好听的美食?又好听又好吃的食物有什么? - 哔哩哔哩', 'href': 'https://www.bilibili.com/read/cv23430069/', 'body': '专栏 / 好听的美食?又好听又好吃的食物有什么? 又好听又好吃的食物有什么? 2023-05-02 18:01 --阅读 · --喜欢 · --评论'}]
# 用户搜索请求
厦门有什么好吃的?
# 要求
你是专业管家团队的一员,会给出有帮助的建议
1. 请根据上下文,对用户搜索请求进行总结性回答,不要包括与请求无关的文本
2. 以 [正文](引用链接) markdown形式在正文中**自然标注**3-5个文本(如商品词或类似文本段),以便跳转
3. 回复优雅、清晰,**绝不重复文本**,行文流畅,长度居中""",
]
@pytest.mark.usefixtures("llm_api")
def test_summarize(llm_api):
pass
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/test_search_engine_meilisearch.py | tests/metagpt/tools/test_search_engine_meilisearch.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/27 22:18
@Author : alexanderwu
@File : test_search_engine_meilisearch.py
"""
import subprocess
import time
import pytest
from metagpt.logs import logger
from metagpt.tools.search_engine_meilisearch import DataSource, MeilisearchEngine
MASTER_KEY = "116Qavl2qpCYNEJNv5-e0RC9kncev1nr1gt7ybEGVLk"
@pytest.fixture()
def search_engine_server():
# Prerequisites
# https://www.meilisearch.com/docs/learn/getting_started/installation
# brew update && brew install meilisearch
meilisearch_process = subprocess.Popen(["meilisearch", "--master-key", f"{MASTER_KEY}"], stdout=subprocess.PIPE)
time.sleep(3)
yield
meilisearch_process.terminate()
meilisearch_process.wait()
@pytest.mark.skip
def test_meilisearch(search_engine_server):
# Prerequisites
# https://www.meilisearch.com/docs/learn/getting_started/installation
# brew update && brew install meilisearch
search_engine = MeilisearchEngine(url="http://localhost:7700", token=MASTER_KEY)
# 假设有一个名为"books"的数据源,包含要添加的文档库
books_data_source = DataSource(name="books", url="https://example.com/books")
# 假设有一个名为"documents"的文档库,包含要添加的文档
documents = [
{"id": 1, "title": "Book 1", "content": "This is the content of Book 1."},
{"id": 2, "title": "Book 2", "content": "This is the content of Book 2."},
{"id": 3, "title": "Book 1", "content": "This is the content of Book 1."},
{"id": 4, "title": "Book 2", "content": "This is the content of Book 2."},
{"id": 5, "title": "Book 1", "content": "This is the content of Book 1."},
{"id": 6, "title": "Book 2", "content": "This is the content of Book 2."},
]
# 添加文档库到搜索引擎
search_engine.add_documents(books_data_source, documents)
logger.info(search_engine.search("Book 1"))
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/test_translate.py | tests/metagpt/tools/test_translate.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/2 17:46
@Author : alexanderwu
@File : test_translate.py
"""
import pytest
from metagpt.logs import logger
from metagpt.tools.translator import Translator
@pytest.mark.asyncio
@pytest.mark.usefixtures("llm_api")
async def test_translate(llm_api):
poetries = [
("Let life be beautiful like summer flowers", "花"),
("The ancient Chinese poetries are all songs.", "中国"),
]
for i, j in poetries:
prompt = Translator.translate_prompt(i)
rsp = await llm_api.aask(prompt)
logger.info(rsp)
assert j in rsp
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/test_tool_recommend.py | tests/metagpt/tools/test_tool_recommend.py | import pytest
from metagpt.schema import Plan, Task
from metagpt.tools import TOOL_REGISTRY
from metagpt.tools.tool_recommend import (
BM25ToolRecommender,
ToolRecommender,
TypeMatchToolRecommender,
)
@pytest.fixture
def mock_plan(mocker):
task_map = {
"1": Task(
task_id="1",
instruction="conduct feature engineering, add new features on the dataset",
task_type="feature engineering",
)
}
plan = Plan(
goal="test requirement",
tasks=list(task_map.values()),
task_map=task_map,
current_task_id="1",
)
return plan
@pytest.fixture
def mock_bm25_tr(mocker):
tr = BM25ToolRecommender(tools=["FillMissingValue", "PolynomialExpansion", "web scraping"])
return tr
def test_tr_init():
tr = ToolRecommender(tools=["FillMissingValue", "PolynomialExpansion", "web scraping", "non-existing tool"])
# web_scraping is a tool tag, it has one tool scrape_web_playwright
assert list(tr.tools.keys()) == [
"FillMissingValue",
"PolynomialExpansion",
"scrape_web_playwright",
]
def test_tr_init_default_tools_value():
tr = ToolRecommender()
assert tr.tools == {}
def test_tr_init_tools_all():
tr = ToolRecommender(tools=["<all>"])
assert list(tr.tools.keys()) == list(TOOL_REGISTRY.get_all_tools().keys())
@pytest.mark.asyncio
async def test_bm25_tr_recall_with_plan(mock_plan, mock_bm25_tr):
result = await mock_bm25_tr.recall_tools(plan=mock_plan)
assert len(result) == 3
assert result[0].name == "PolynomialExpansion"
@pytest.mark.asyncio
async def test_bm25_tr_recall_no_plan(mock_plan, mock_bm25_tr):
result = await mock_bm25_tr.recall_tools(
context="conduct feature engineering, add new features on the dataset", plan=None
)
assert len(result) == 3
assert result[0].name == "PolynomialExpansion"
@pytest.mark.asyncio
async def test_bm25_recommend_tools(mock_bm25_tr):
result = await mock_bm25_tr.recommend_tools(context="conduct feature engineering, add new features on the dataset")
assert len(result) == 2 # web scraping tool should be filtered out at rank stage
assert result[0].name == "PolynomialExpansion"
@pytest.mark.asyncio
async def test_get_recommended_tool_info(mock_plan, mock_bm25_tr):
result = await mock_bm25_tr.get_recommended_tool_info(plan=mock_plan)
assert isinstance(result, str)
@pytest.mark.asyncio
async def test_tm_tr_recall_with_plan(mock_plan, mock_bm25_tr):
tr = TypeMatchToolRecommender(tools=["FillMissingValue", "PolynomialExpansion", "web scraping"])
result = await tr.recall_tools(plan=mock_plan)
assert len(result) == 1
assert result[0].name == "PolynomialExpansion"
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/test_moderation.py | tests/metagpt/tools/test_moderation.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/9/26 14:46
@Author : zhanglei
@File : test_moderation.py
"""
import pytest
from metagpt.config2 import config
from metagpt.llm import LLM
from metagpt.tools.moderation import Moderation
@pytest.mark.asyncio
@pytest.mark.parametrize(
("content",),
[
[
["I will kill you", "The weather is really nice today", "I want to hit you"],
]
],
)
async def test_amoderation(content):
# Prerequisites
assert config.get_openai_llm()
moderation = Moderation(LLM())
results = await moderation.amoderation(content=content)
assert isinstance(results, list)
assert len(results) == len(content)
results = await moderation.amoderation_with_categories(content=content)
assert isinstance(results, list)
assert results
for m in results:
assert "flagged" in m
assert "true_categories" in m
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/test_tool_convert.py | tests/metagpt/tools/test_tool_convert.py | from typing import Literal, Union
import pandas as pd
from metagpt.tools.tool_convert import (
convert_code_to_tool_schema,
convert_code_to_tool_schema_ast,
)
class DummyClass:
"""
Completing missing values with simple strategies.
"""
def __init__(self, features: list, strategy: str = "mean", fill_value=None):
"""
Initialize self.
Args:
features (list): Columns to be processed.
strategy (str, optional): The imputation strategy, notice 'mean' and 'median' can only
be used for numeric features. Enum: ['mean', 'median', 'most_frequent', 'constant']. Defaults to 'mean'.
fill_value (int, optional): Fill_value is used to replace all occurrences of missing_values.
Defaults to None.
"""
pass
def fit(self, df: pd.DataFrame):
"""
Fit the FillMissingValue model.
Args:
df (pd.DataFrame): The input DataFrame.
"""
pass
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Transform the input DataFrame with the fitted model.
Args:
df (pd.DataFrame): The input DataFrame.
Returns:
pd.DataFrame: The transformed DataFrame.
"""
pass
class DummySubClass(DummyClass):
"""sub class docstring"""
def sub_method(self, df: pd.DataFrame):
"""sub method"""
pass
def dummy_fn(
df: pd.DataFrame,
s: str,
k: int = 5,
type: Literal["a", "b", "c"] = "a",
test_dict: dict[str, int] = None,
test_union: Union[str, list[str]] = "",
) -> dict:
"""
Analyzes a DataFrame and categorizes its columns based on data types.
Args:
df: The DataFrame to be analyzed.
Another line for df.
s (str): Some test string param.
Another line for s.
k (int, optional): Some test integer param. Defaults to 5.
type (Literal["a", "b", "c"], optional): Some test type. Defaults to 'a'.
more_args: will be omitted here for testing
Returns:
dict: A dictionary with four keys ('Category', 'Numeric', 'Datetime', 'Others').
Each key corresponds to a list of column names belonging to that category.
"""
pass
async def dummy_async_fn(df: pd.DataFrame) -> dict:
"""
A dummy async function for test
Args:
df (pd.DataFrame): test args.
Returns:
dict: test returns.
"""
pass
def test_convert_code_to_tool_schema_class():
expected = {
"type": "class",
"description": "Completing missing values with simple strategies.",
"methods": {
"__init__": {
"type": "function",
"description": "Initialize self. ",
"signature": "(self, features: list, strategy: str = 'mean', fill_value=None)",
"parameters": "Args: features (list): Columns to be processed. strategy (str, optional): The imputation strategy, notice 'mean' and 'median' can only be used for numeric features. Enum: ['mean', 'median', 'most_frequent', 'constant']. Defaults to 'mean'. fill_value (int, optional): Fill_value is used to replace all occurrences of missing_values. Defaults to None.",
},
"fit": {
"type": "function",
"description": "Fit the FillMissingValue model. ",
"signature": "(self, df: pandas.core.frame.DataFrame)",
"parameters": "Args: df (pd.DataFrame): The input DataFrame.",
},
"transform": {
"type": "function",
"description": "Transform the input DataFrame with the fitted model. ",
"signature": "(self, df: pandas.core.frame.DataFrame) -> pandas.core.frame.DataFrame",
"parameters": "Args: df (pd.DataFrame): The input DataFrame. Returns: pd.DataFrame: The transformed DataFrame.",
},
},
}
schema = convert_code_to_tool_schema(DummyClass)
assert schema == expected
def test_convert_code_to_tool_schema_subclass():
schema = convert_code_to_tool_schema(DummySubClass)
assert "sub_method" in schema["methods"] # sub class method should be included
assert "fit" in schema["methods"] # parent class method should be included
def test_convert_code_to_tool_schema_include():
schema = convert_code_to_tool_schema(DummyClass, include=["fit"])
assert "fit" in schema["methods"]
assert "transform" not in schema["methods"]
def test_convert_code_to_tool_schema_function():
expected = {
"type": "function",
"description": "Analyzes a DataFrame and categorizes its columns based on data types. ",
"signature": "(df: pandas.core.frame.DataFrame, s: str, k: int = 5, type: Literal['a', 'b', 'c'] = 'a', test_dict: dict[str, int] = None, test_union: Union[str, list[str]] = '') -> dict",
"parameters": "Args: df: The DataFrame to be analyzed. Another line for df. s (str): Some test string param. Another line for s. k (int, optional): Some test integer param. Defaults to 5. type (Literal[\"a\", \"b\", \"c\"], optional): Some test type. Defaults to 'a'. more_args: will be omitted here for testing Returns: dict: A dictionary with four keys ('Category', 'Numeric', 'Datetime', 'Others'). Each key corresponds to a list of column names belonging to that category.",
}
schema = convert_code_to_tool_schema(dummy_fn)
assert schema == expected
def test_convert_code_to_tool_schema_async_function():
schema = convert_code_to_tool_schema(dummy_async_fn)
assert schema.get("type") == "async_function"
TEST_CODE_FILE_TEXT = '''
import pandas as pd # imported obj should not be parsed
from some_module1 import some_imported_function, SomeImportedClass # imported obj should not be parsed
from ..some_module2 import some_imported_function2 # relative import should not result in an error
class MyClass:
"""This is a MyClass docstring."""
def __init__(self, arg1):
"""This is the constructor docstring."""
self.arg1 = arg1
def my_method(self, arg2: Union[list[str], str], arg3: pd.DataFrame, arg4: int = 1, arg5: Literal["a","b","c"] = "a") -> Tuple[int, str]:
"""
This is a method docstring.
Args:
arg2 (Union[list[str], str]): A union of a list of strings and a string.
...
Returns:
Tuple[int, str]: A tuple of an integer and a string.
"""
return self.arg4 + arg5
async def my_async_method(self, some_arg) -> str:
return "hi"
def _private_method(self): # private should not be parsed
return "private"
def my_function(arg1, arg2) -> dict:
"""This is a function docstring."""
return arg1 + arg2
def my_async_function(arg1, arg2) -> dict:
return arg1 + arg2
def _private_function(): # private should not be parsed
return "private"
'''
def test_convert_code_to_tool_schema_ast():
expected = {
"MyClass": {
"type": "class",
"description": "This is a MyClass docstring.",
"methods": {
"__init__": {
"type": "function",
"description": "This is the constructor docstring.",
"signature": "(self, arg1)",
"parameters": "",
},
"my_method": {
"type": "function",
"description": "This is a method docstring. ",
"signature": "(self, arg2: Union[list[str], str], arg3: pd.DataFrame, arg4: int = 1, arg5: Literal['a', 'b', 'c'] = 'a') -> Tuple[int, str]",
"parameters": "Args: arg2 (Union[list[str], str]): A union of a list of strings and a string. ... Returns: Tuple[int, str]: A tuple of an integer and a string.",
},
"my_async_method": {
"type": "async_function",
"description": "",
"signature": "(self, some_arg) -> str",
"parameters": "",
},
},
"code": 'class MyClass:\n """This is a MyClass docstring."""\n def __init__(self, arg1):\n """This is the constructor docstring."""\n self.arg1 = arg1\n\n def my_method(self, arg2: Union[list[str], str], arg3: pd.DataFrame, arg4: int = 1, arg5: Literal["a","b","c"] = "a") -> Tuple[int, str]:\n """\n This is a method docstring.\n \n Args:\n arg2 (Union[list[str], str]): A union of a list of strings and a string.\n ...\n \n Returns:\n Tuple[int, str]: A tuple of an integer and a string.\n """\n return self.arg4 + arg5\n \n async def my_async_method(self, some_arg) -> str:\n return "hi"\n \n def _private_method(self): # private should not be parsed\n return "private"',
},
"my_function": {
"type": "function",
"description": "This is a function docstring.",
"signature": "(arg1, arg2) -> dict",
"parameters": "",
"code": 'def my_function(arg1, arg2) -> dict:\n """This is a function docstring."""\n return arg1 + arg2',
},
"my_async_function": {
"type": "function",
"description": "",
"signature": "(arg1, arg2) -> dict",
"parameters": "",
"code": "def my_async_function(arg1, arg2) -> dict:\n return arg1 + arg2",
},
}
schemas = convert_code_to_tool_schema_ast(TEST_CODE_FILE_TEXT)
assert schemas == expected
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/test_ut_writer.py | tests/metagpt/tools/test_ut_writer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/4/30 21:44
@Author : alexanderwu
@File : test_ut_writer.py
"""
from pathlib import Path
import pytest
from openai.resources.chat.completions import AsyncCompletions
from openai.types import CompletionUsage
from openai.types.chat.chat_completion import (
ChatCompletion,
ChatCompletionMessage,
Choice,
)
from openai.types.chat.chat_completion_message_tool_call import (
ChatCompletionMessageToolCall,
Function,
)
from metagpt.config2 import config
from metagpt.const import API_QUESTIONS_PATH, UT_PY_PATH
from metagpt.tools.ut_writer import YFT_PROMPT_PREFIX, UTGenerator
class TestUTWriter:
@pytest.mark.asyncio
async def test_api_to_ut_sample(self, mocker):
async def mock_create(*args, **kwargs):
return ChatCompletion(
id="chatcmpl-8n5fAd21w2J1IIFkI4qxWlNfM7QRC",
choices=[
Choice(
finish_reason="stop",
index=0,
logprobs=None,
message=ChatCompletionMessage(
content=None,
role="assistant",
function_call=None,
tool_calls=[
ChatCompletionMessageToolCall(
id="call_EjjmIY7GMspHu3r9mx8gPA2k",
function=Function(
arguments='{"code":"import string\\nimport random\\n\\ndef random_string'
"(length=10):\\n return ''.join(random.choice(string.ascii_"
'lowercase) for i in range(length))"}',
name="execute",
),
type="function",
)
],
),
)
],
created=1706710532,
model="gpt-4-turbo",
object="chat.completion",
system_fingerprint="fp_04f9a1eebf",
usage=CompletionUsage(completion_tokens=35, prompt_tokens=1982, total_tokens=2017),
)
mocker.patch.object(AsyncCompletions, "create", mock_create)
# Prerequisites
swagger_file = Path(__file__).parent / "../../data/ut_writer/yft_swaggerApi.json"
assert swagger_file.exists()
assert config.get_openai_llm()
tags = ["测试", "作业"]
# 这里在文件中手动加入了两个测试标签的API
utg = UTGenerator(
swagger_file=str(swagger_file),
ut_py_path=UT_PY_PATH,
questions_path=API_QUESTIONS_PATH,
template_prefix=YFT_PROMPT_PREFIX,
)
ret = await utg.generate_ut(include_tags=tags)
# 后续加入对文件生成内容与数量的检验
assert ret
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/test_openapi_v3_hello.py | tests/metagpt/tools/test_openapi_v3_hello.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/12/26
@Author : mashenquan
@File : test_openapi_v3_hello.py
"""
import asyncio
import subprocess
from pathlib import Path
import pytest
import requests
@pytest.mark.asyncio
async def test_hello(context):
workdir = Path(__file__).parent.parent.parent.parent
script_pathname = workdir / "metagpt/tools/openapi_v3_hello.py"
env = context.new_environ()
env["PYTHONPATH"] = str(workdir) + ":" + env.get("PYTHONPATH", "")
process = subprocess.Popen(["python", str(script_pathname)], cwd=workdir, env=env)
await asyncio.sleep(5)
try:
url = "http://localhost:8082/openapi/greeting/dave"
headers = {"accept": "text/plain", "Content-Type": "application/json"}
data = {}
response = requests.post(url, headers=headers, json=data)
assert response.text == "Hello dave\n"
finally:
process.terminate()
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/test_web_browser_engine_selenium.py | tests/metagpt/tools/test_web_browser_engine_selenium.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import browsers
import pytest
from metagpt.tools import web_browser_engine_selenium
from metagpt.utils.parse_html import WebPage
@pytest.mark.asyncio
@pytest.mark.parametrize(
"browser_type, use_proxy,",
[
pytest.param(
"chrome",
False,
marks=pytest.mark.skipif(not browsers.get("chrome"), reason="chrome browser not found"),
),
pytest.param(
"firefox",
False,
marks=pytest.mark.skipif(not browsers.get("firefox"), reason="firefox browser not found"),
),
pytest.param(
"edge",
False,
marks=pytest.mark.skipif(not browsers.get("msedge"), reason="edge browser not found"),
),
],
ids=["chrome-normal", "firefox-normal", "edge-normal"],
)
async def test_scrape_web_page(browser_type, use_proxy, proxy, capfd, http_server):
# Prerequisites
# firefox, chrome, Microsoft Edge
server, url = await http_server()
urls = [url, url, url]
proxy_url = None
if use_proxy:
proxy_server, proxy_url = await proxy()
browser = web_browser_engine_selenium.SeleniumWrapper(browser_type=browser_type, proxy=proxy_url)
result = await browser.run(url)
assert isinstance(result, WebPage)
assert "MetaGPT" in result.inner_text
results = await browser.run(url, *urls)
assert isinstance(results, list)
assert len(results) == len(urls) + 1
assert all(("MetaGPT" in i.inner_text) for i in results)
if use_proxy:
proxy_server.close()
await proxy_server.wait_closed()
assert "Proxy: localhost" in capfd.readouterr().out
await server.stop()
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/test_metagpt_text_to_image.py | tests/metagpt/tools/test_metagpt_text_to_image.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/12/26
@Author : mashenquan
@File : test_metagpt_text_to_image.py
"""
import base64
from unittest.mock import AsyncMock
import pytest
from metagpt.config2 import config
from metagpt.tools.metagpt_text_to_image import oas3_metagpt_text_to_image
@pytest.mark.asyncio
async def test_draw(mocker):
# mock
mock_post = mocker.patch("aiohttp.ClientSession.post")
mock_response = AsyncMock()
mock_response.status = 200
mock_response.json.return_value = {"images": [base64.b64encode(b"success")], "parameters": {"size": 1110}}
mock_post.return_value.__aenter__.return_value = mock_response
# Prerequisites
assert config.metagpt_tti_url
binary_data = await oas3_metagpt_text_to_image("Panda emoji")
assert binary_data
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/__init__.py | tests/metagpt/tools/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/4/29 16:27
@Author : alexanderwu
@File : __init__.py
"""
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/test_openai_text_to_embedding.py | tests/metagpt/tools/test_openai_text_to_embedding.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/12/26
@Author : mashenquan
@File : test_openai_text_to_embedding.py
"""
import json
from pathlib import Path
import pytest
from metagpt.config2 import config
from metagpt.tools.openai_text_to_embedding import oas3_openai_text_to_embedding
from metagpt.utils.common import aread
@pytest.mark.asyncio
async def test_embedding(mocker):
# mock
mock_post = mocker.patch("aiohttp.ClientSession.post")
mock_response = mocker.AsyncMock()
mock_response.status = 200
data = await aread(Path(__file__).parent / "../../data/openai/embedding.json")
mock_response.json.return_value = json.loads(data)
mock_post.return_value.__aenter__.return_value = mock_response
type(config.get_openai_llm()).proxy = mocker.PropertyMock(return_value="http://mock.proxy")
# Prerequisites
llm_config = config.get_openai_llm()
assert llm_config
assert llm_config.proxy
result = await oas3_openai_text_to_embedding(
"Panda emoji", openai_api_key=llm_config.api_key, proxy=llm_config.proxy
)
assert result
assert result.model
assert len(result.data) > 0
assert len(result.data[0].embedding) > 0
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/test_openai_text_to_image.py | tests/metagpt/tools/test_openai_text_to_image.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/12/26
@Author : mashenquan
@File : test_openai_text_to_image.py
"""
import base64
import openai
import pytest
from pydantic import BaseModel
from metagpt.config2 import config
from metagpt.llm import LLM
from metagpt.tools.openai_text_to_image import (
OpenAIText2Image,
oas3_openai_text_to_image,
)
from metagpt.utils.s3 import S3
@pytest.mark.asyncio
async def test_draw(mocker):
# mock
mock_url = mocker.Mock()
mock_url.url.return_value = "http://mock.com/0.png"
class _MockData(BaseModel):
data: list
mock_data = _MockData(data=[mock_url])
mocker.patch.object(openai.resources.images.AsyncImages, "generate", return_value=mock_data)
mock_post = mocker.patch("aiohttp.ClientSession.get")
mock_response = mocker.AsyncMock()
mock_response.status = 200
mock_response.read.return_value = base64.b64encode(b"success")
mock_post.return_value.__aenter__.return_value = mock_response
mocker.patch.object(S3, "cache", return_value="http://mock.s3.com/0.png")
# Prerequisites
llm_config = config.get_openai_llm()
assert llm_config
binary_data = await oas3_openai_text_to_image("Panda emoji", llm=LLM(llm_config=llm_config))
assert binary_data
@pytest.mark.asyncio
async def test_get_image():
data = await OpenAIText2Image.get_image_data(
url="https://www.baidu.com/img/PCtm_d9c8750bed0b3c7d089fa7d55720d6cf.png"
)
assert data
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/test_metagpt_oas3_api_svc.py | tests/metagpt/tools/test_metagpt_oas3_api_svc.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/12/26
@Author : mashenquan
@File : test_metagpt_oas3_api_svc.py
"""
import asyncio
import subprocess
from pathlib import Path
import pytest
import requests
@pytest.mark.asyncio
async def test_oas2_svc(context):
workdir = Path(__file__).parent.parent.parent.parent
script_pathname = workdir / "metagpt/tools/metagpt_oas3_api_svc.py"
env = context.new_environ()
env["PYTHONPATH"] = str(workdir) + ":" + env.get("PYTHONPATH", "")
process = subprocess.Popen(["python", str(script_pathname)], cwd=str(workdir), env=env)
await asyncio.sleep(5)
try:
url = "http://localhost:8080/openapi/greeting/dave"
headers = {"accept": "text/plain", "Content-Type": "application/json"}
data = {}
response = requests.post(url, headers=headers, json=data)
assert response.text == "Hello dave\n"
finally:
process.terminate()
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/test_tool_registry.py | tests/metagpt/tools/test_tool_registry.py | import pytest
from metagpt.tools.tool_registry import ToolRegistry
@pytest.fixture
def tool_registry():
return ToolRegistry()
# Test Initialization
def test_initialization(tool_registry):
assert isinstance(tool_registry, ToolRegistry)
assert tool_registry.tools == {}
assert tool_registry.tools_by_tags == {}
class TestClassTool:
"""test class"""
def test_class_fn(self):
"""test class fn"""
pass
def test_fn():
"""test function"""
pass
# Test Tool Registration Class
def test_register_tool_class(tool_registry):
tool_registry.register_tool("TestClassTool", "/path/to/tool", tool_source_object=TestClassTool)
assert "TestClassTool" in tool_registry.tools
# Test Tool Registration Function
def test_register_tool_fn(tool_registry):
tool_registry.register_tool("test_fn", "/path/to/tool", tool_source_object=test_fn)
assert "test_fn" in tool_registry.tools
# Test Tool Existence Checks
def test_has_tool(tool_registry):
tool_registry.register_tool("TestClassTool", "/path/to/tool", tool_source_object=TestClassTool)
assert tool_registry.has_tool("TestClassTool")
assert not tool_registry.has_tool("NonexistentTool")
# Test Tool Retrieval
def test_get_tool(tool_registry):
tool_registry.register_tool("TestClassTool", "/path/to/tool", tool_source_object=TestClassTool)
tool = tool_registry.get_tool("TestClassTool")
assert tool is not None
assert tool.name == "TestClassTool"
assert tool.path == "/path/to/tool"
assert "description" in tool.schemas
def test_has_tool_tag(tool_registry):
tool_registry.register_tool(
"TestClassTool", "/path/to/tool", tool_source_object=TestClassTool, tags=["machine learning", "test"]
)
assert tool_registry.has_tool_tag("test")
assert not tool_registry.has_tool_tag("Non-existent tag")
def test_get_tools_by_tag(tool_registry):
tool_tag_name = "Test Tag"
tool_name = "TestTool"
tool_path = "/path/to/tool"
tool_registry.register_tool(tool_name, tool_path, tags=[tool_tag_name], tool_source_object=TestClassTool)
tools_by_tag = tool_registry.get_tools_by_tag(tool_tag_name)
assert tools_by_tag is not None
assert tool_name in tools_by_tag
tools_by_tag_non_existent = tool_registry.get_tools_by_tag("Non-existent Tag")
assert not tools_by_tag_non_existent
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/test_search_engine.py | tests/metagpt/tools/test_search_engine.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/2 17:46
@Author : alexanderwu
@File : test_search_engine.py
"""
from __future__ import annotations
from typing import Callable
import pytest
from metagpt.configs.search_config import SearchConfig
from metagpt.logs import logger
from metagpt.tools import SearchEngineType
from metagpt.tools.search_engine import SearchEngine
class MockSearchEnine:
async def run(self, query: str, max_results: int = 8, as_string: bool = True) -> str | list[dict[str, str]]:
rets = [
{"url": "https://metagpt.com/mock/{i}", "title": query, "snippet": query * i} for i in range(max_results)
]
return "\n".join(rets) if as_string else rets
@pytest.mark.asyncio
@pytest.mark.parametrize(
("search_engine_type", "run_func", "max_results", "as_string"),
[
(SearchEngineType.SERPAPI_GOOGLE, None, 8, True),
(SearchEngineType.SERPAPI_GOOGLE, None, 4, False),
(SearchEngineType.DIRECT_GOOGLE, None, 8, True),
(SearchEngineType.DIRECT_GOOGLE, None, 6, False),
(SearchEngineType.SERPER_GOOGLE, None, 8, True),
(SearchEngineType.SERPER_GOOGLE, None, 6, False),
(SearchEngineType.DUCK_DUCK_GO, None, 8, True),
(SearchEngineType.DUCK_DUCK_GO, None, 6, False),
(SearchEngineType.BING, None, 6, False),
(SearchEngineType.CUSTOM_ENGINE, MockSearchEnine().run, 8, False),
(SearchEngineType.CUSTOM_ENGINE, MockSearchEnine().run, 6, False),
],
)
async def test_search_engine(
search_engine_type,
run_func: Callable,
max_results: int,
as_string: bool,
search_engine_mocker,
):
# Prerequisites
search_engine_config = {"engine": search_engine_type, "run_func": run_func}
if search_engine_type is SearchEngineType.SERPAPI_GOOGLE:
search_engine_config["api_key"] = "mock-serpapi-key"
elif search_engine_type is SearchEngineType.DIRECT_GOOGLE:
search_engine_config["api_key"] = "mock-google-key"
search_engine_config["cse_id"] = "mock-google-cse"
elif search_engine_type is SearchEngineType.SERPER_GOOGLE:
search_engine_config["api_key"] = "mock-serper-key"
async def test(search_engine):
rsp = await search_engine.run("metagpt", max_results, as_string)
logger.info(rsp)
if as_string:
assert isinstance(rsp, str)
else:
assert isinstance(rsp, list)
assert len(rsp) <= max_results
await test(SearchEngine(**search_engine_config))
search_engine_config["api_type"] = search_engine_config.pop("engine")
if run_func:
await test(SearchEngine.from_search_func(run_func))
search_engine_config["search_func"] = search_engine_config.pop("run_func")
await test(SearchEngine.from_search_config(SearchConfig(**search_engine_config)))
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/libs/test_env.py | tests/metagpt/tools/libs/test_env.py | import os
from unittest.mock import AsyncMock
import pytest
from metagpt.tools.libs.env import (
EnvKeyNotFoundError,
default_get_env_description,
get_env,
get_env_default,
set_get_env_entry,
)
@pytest.mark.asyncio
class TestEnv:
@pytest.fixture(autouse=True)
def setup_and_teardown(self):
"""Setup and teardown for environment variables."""
self.mock_os_env = {
"TEST_APP-KEY": "value1",
"TEST_APP_KEY": "value2",
}
os.environ.update(self.mock_os_env)
yield
# Clear added environment variables
for key in self.mock_os_env.keys():
del os.environ[key]
async def test_get_env(self):
"""Test retrieving an environment variable."""
result = await get_env("KEY", app_name="TEST_APP")
assert result == "value1"
with pytest.raises(EnvKeyNotFoundError):
await get_env("NON_EXISTENT_KEY")
# Using no app_name
result = await get_env("TEST_APP_KEY")
assert result == "value2"
async def test_get_env_default(self):
"""Test retrieving environment variable with default value."""
result = await get_env_default("NON_EXISTENT_KEY", app_name="TEST_APP", default_value="default")
assert result == "default"
async def test_get_env_description(self):
"""Test retrieving descriptions for environment variables."""
descriptions = await default_get_env_description()
assert 'await get_env(key="KEY", app_name="TEST_APP")' in descriptions
assert (
descriptions['await get_env(key="KEY", app_name="TEST_APP")']
== "Return the value of environment variable `TEST_APP-KEY`."
)
async def test_set_get_env_entry(self):
"""Test overriding get_env functionality."""
mock_get_env_value = "mocked_value"
mock_func = AsyncMock(return_value=mock_get_env_value)
set_get_env_entry(mock_func, default_get_env_description)
result = await get_env("set_get_env")
assert result == mock_get_env_value
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/libs/test_gpt_v_generator.py | tests/metagpt/tools/libs/test_gpt_v_generator.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/01/15
@Author : mannaandpoem
@File : test_gpt_v_generator.py
"""
from pathlib import Path
import pytest
from metagpt import logs
from metagpt.const import METAGPT_ROOT
from metagpt.tools.libs.gpt_v_generator import GPTvGenerator
@pytest.fixture
def mock_webpage_filename_with_styles_and_scripts(mocker):
mock_data = """```html\n<html>\n<script src="scripts.js"></script>
<link rel="stylesheet" href="styles.css">\n</html>\n```\n
```css\n/* styles.css */\n```\n
```javascript\n// scripts.js\n```\n"""
mocker.patch("metagpt.provider.base_llm.BaseLLM.aask", return_value=mock_data)
return mocker
@pytest.fixture
def mock_webpage_filename_with_style_and_script(mocker):
mock_data = """```html\n<html>\n<script src="script.js"></script>
<link rel="stylesheet" href="style.css">\n</html>\n```\n
```css\n/* style.css */\n```\n
```javascript\n// script.js\n```\n"""
mocker.patch("metagpt.provider.base_llm.BaseLLM.aask", return_value=mock_data)
return mocker
@pytest.fixture
def mock_image_layout(mocker):
image_layout = "The layout information of the sketch image is ..."
mocker.patch("metagpt.provider.base_llm.BaseLLM.aask", return_value=image_layout)
return mocker
@pytest.fixture
def image_path():
return f"{METAGPT_ROOT}/docs/resources/workspace/content_rec_sys/resources/competitive_analysis.png"
@pytest.mark.asyncio
async def test_generate_webpages(mock_webpage_filename_with_styles_and_scripts, image_path):
generator = GPTvGenerator()
rsp = await generator.generate_webpages(image_path=image_path)
logs.logger.info(rsp)
assert "html" in rsp
assert "css" in rsp
assert "javascript" in rsp
@pytest.mark.asyncio
async def test_save_webpages_with_styles_and_scripts(mock_webpage_filename_with_styles_and_scripts, image_path):
generator = GPTvGenerator()
webpages = await generator.generate_webpages(image_path)
webpages_dir = generator.save_webpages(webpages=webpages, save_folder_name="test_1")
logs.logger.info(webpages_dir)
assert webpages_dir.exists()
assert (webpages_dir / "index.html").exists()
assert (webpages_dir / "styles.css").exists()
assert (webpages_dir / "scripts.js").exists()
@pytest.mark.asyncio
async def test_save_webpages_with_style_and_script(mock_webpage_filename_with_style_and_script, image_path):
generator = GPTvGenerator()
webpages = await generator.generate_webpages(image_path)
webpages_dir = generator.save_webpages(webpages=webpages, save_folder_name="test_2")
logs.logger.info(webpages_dir)
assert webpages_dir.exists()
assert (webpages_dir / "index.html").exists()
assert (webpages_dir / "style.css").exists()
assert (webpages_dir / "script.js").exists()
@pytest.mark.asyncio
async def test_analyze_layout(mock_image_layout, image_path):
layout = await GPTvGenerator().analyze_layout(Path(image_path))
logs.logger.info(layout)
assert layout
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/libs/test_software_development.py | tests/metagpt/tools/libs/test_software_development.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import Dict
import pytest
from metagpt.tools.libs.software_development import import_git_repo
async def get_env_description() -> Dict[str, str]:
return {'await get_env(key="access_token", app_name="github")': "get the access token for github authentication."}
@pytest.mark.skip
@pytest.mark.asyncio
async def test_import_repo():
url = "https://github.com/spec-first/connexion.git"
path = await import_git_repo(url)
assert path
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/libs/test_data_preprocess.py | tests/metagpt/tools/libs/test_data_preprocess.py | from datetime import datetime
import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
from metagpt.tools.libs.data_preprocess import (
FillMissingValue,
LabelEncode,
MaxAbsScale,
MinMaxScale,
OneHotEncode,
OrdinalEncode,
RobustScale,
StandardScale,
get_column_info,
)
@pytest.fixture
def mock_datasets():
return pd.DataFrame(
{
"num1": [1, 2, np.nan, 4, 5],
"cat1": ["A", "B", np.nan, "D", "A"],
"date1": [
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
],
}
)
def test_fill_missing_value(mock_datasets):
fm = FillMissingValue(features=["num1"], strategy="mean")
transformed = fm.fit_transform(mock_datasets.copy())
assert transformed["num1"].isnull().sum() == 0
def test_min_max_scale(mock_datasets):
mms = MinMaxScale(features=["num1"])
transformed = mms.fit_transform(mock_datasets.copy())
npt.assert_allclose(transformed["num1"].min(), 0)
npt.assert_allclose(transformed["num1"].max(), 1)
def test_standard_scale(mock_datasets):
ss = StandardScale(features=["num1"])
transformed = ss.fit_transform(mock_datasets.copy())
assert int(transformed["num1"].mean()) == 0
assert int(transformed["num1"].std()) == 1
def test_max_abs_scale(mock_datasets):
mas = MaxAbsScale(features=["num1"])
transformed = mas.fit_transform(mock_datasets.copy())
npt.assert_allclose(transformed["num1"].abs().max(), 1)
def test_robust_scale(mock_datasets):
rs = RobustScale(features=["num1"])
transformed = rs.fit_transform(mock_datasets.copy())
assert int(transformed["num1"].median()) == 0
def test_ordinal_encode(mock_datasets):
oe = OrdinalEncode(features=["cat1"])
transformed = oe.fit_transform(mock_datasets.copy())
assert transformed["cat1"].max() == 2
def test_one_hot_encode(mock_datasets):
ohe = OneHotEncode(features=["cat1"])
transformed = ohe.fit_transform(mock_datasets.copy())
assert transformed["cat1_A"].max() == 1
def test_label_encode(mock_datasets):
le = LabelEncode(features=["cat1"])
transformed = le.fit_transform(mock_datasets.copy())
assert transformed["cat1"].max() == 3
# test transform with unseen data
test = mock_datasets.copy()
test["cat1"] = ["A", "B", "C", "D", "E"]
transformed = le.transform(test)
assert transformed["cat1"].max() == 4
def test_get_column_info(mock_datasets):
df = mock_datasets
column_info = get_column_info(df)
assert column_info == {
"Category": ["cat1"],
"Numeric": ["num1"],
"Datetime": ["date1"],
"Others": [],
}
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/libs/test_git.py | tests/metagpt/tools/libs/test_git.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import annotations
import os
import uuid
import pytest
from github import Auth, Github
from pydantic import BaseModel
from metagpt.context import Context
from metagpt.roles.di.data_interpreter import DataInterpreter
from metagpt.schema import UserMessage
from metagpt.utils.common import awrite
from metagpt.utils.git_repository import GitRepository
class SWEBenchItem(BaseModel):
base_commit: str
repo: str
async def get_env(key: str, app_name: str = ""):
return os.environ.get(key)
@pytest.mark.skip
@pytest.mark.asyncio
async def test_login():
auth = Auth.Login(await get_env("GITHUB_USER"), await get_env("GITHUB_PWD"))
g = Github(auth=auth)
repo = g.get_repo("geekan/MetaGPT")
topics = repo.get_topics()
assert topics
open_issues = repo.get_issues(state="open")
issues = [i for i in open_issues]
assert issues
@pytest.mark.skip
@pytest.mark.asyncio
async def test_new_issue():
issue = await GitRepository.create_issue(
repo_name="iorisa/MetaGPT",
title="This is a new issue",
body="This is the issue body",
access_token=await get_env(key="access_token", app_name="github"),
)
print(issue)
assert issue.number
pass
@pytest.mark.skip
@pytest.mark.asyncio
async def test_new_pr():
body = """
>>> SUMMARY
>>> Change HTTP library used to send requests
>>>
>>> TESTS
>>> - [x] Send 'GET' request
>>> - [x] Send 'POST' request with/without body
"""
pr = await GitRepository.create_pull(
base_repo_name="iorisa/MetaGPT",
base="send18",
head="fixbug/gbk",
title="Test pr",
body=body,
access_token=await get_env(key="access_token", app_name="github"),
)
print(pr)
assert pr
@pytest.mark.skip
@pytest.mark.asyncio
async def test_new_pr1():
body = """
>>> SUMMARY
>>> Change HTTP library used to send requests
>>>
>>> TESTS
>>> - [x] Send 'GET' request
>>> - [x] Send 'POST' request with/without body
"""
pr = await GitRepository.create_pull(
head_repo_name="iorisa/MetaGPT",
head="fixbug/vscode",
base_repo_name="send18/MetaGPT",
base="dev",
title="Test pr",
body=body,
access_token=await get_env(key="access_token", app_name="github"),
)
print(pr)
assert pr
@pytest.mark.skip
@pytest.mark.asyncio
async def test_auth():
access_token = await get_env(key="access_token", app_name="github")
auth = Auth.Token(access_token)
g = Github(auth=auth)
u = g.get_user()
v = u.get_repos(visibility="public")
a = [i.full_name for i in v]
assert a
print(a)
pass
@pytest.mark.skip
@pytest.mark.asyncio
async def test_github(context):
repo = await GitRepository.clone_from(url="https://github.com/iorisa/snake-game.git")
content = uuid.uuid4().hex
await awrite(filename=repo.workdir / "README.md", data=content)
branch = await repo.push(
new_branch=f"feature/{content[0:8]}", access_token=await get_env(key="access_token", app_name="github")
)
pr = await GitRepository.create_pull(
base=branch.base,
head=branch.head,
base_repo_name=branch.repo_name,
title=f"new pull {content[0:8]}",
access_token=await get_env(key="access_token", app_name="github"),
)
assert pr
@pytest.mark.skip
@pytest.mark.asyncio
@pytest.mark.parametrize(
"content",
[
# "create a new issue to github repo 'iorisa/snake-game' :'The snake did not grow longer after eating'",
"Resolve the issue #1 'Snake not growing longer after eating' in the GitHub repository https://github.com/iorisa/snake-game.git', and create a new pull request about the issue"
],
)
async def test_git_create_issue(content: str):
context = Context()
di = DataInterpreter(context=context, tools=["<all>"])
prerequisite = "from metagpt.tools.libs import get_env"
await di.execute_code.run(code=prerequisite, language="python")
di.put_message(UserMessage(content=content))
while not di.is_idle:
await di.run()
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/libs/test_browser.py | tests/metagpt/tools/libs/test_browser.py | from unittest.mock import AsyncMock, patch
import pytest
import pytest_asyncio
from metagpt.const import TEST_DATA_PATH
from metagpt.tools.libs.browser import Browser
TEST_URL = "https://docs.deepwisdom.ai/main/en/guide/get_started/quickstart.html"
TEST_SCREENSHOT_PATH = TEST_DATA_PATH / "screenshot.png"
@pytest.mark.asyncio
class TestBrowser:
test_url = "https://juejin.cn/"
@pytest_asyncio.fixture(autouse=True)
async def browser_client(self):
"""Setup before each test case."""
print("browser_client")
browser = await self.async_setup()
yield browser
await self.browser.stop()
async def async_setup(self):
self.browser = Browser(headless=True)
await self.browser.start()
return self.browser
async def async_teardown(self):
"""Teardown after each test case."""
await self.browser.stop()
async def test_start_and_stop(self):
"""Test browser start and stop functionality."""
assert self.browser.playwright is not None
assert self.browser.browser_instance is not None
assert self.browser.browser_ctx is not None
assert self.browser.page is not None
await self.async_teardown()
assert self.browser.playwright is None
assert self.browser.browser_instance is None
assert self.browser.browser_ctx is None
async def test_goto(self):
"""Test navigating to a URL."""
mock_reporter = AsyncMock()
self.browser.reporter = mock_reporter
result = await self.browser.goto(self.test_url)
assert "SUCCESS" in result
assert self.test_url in self.browser.page.url
@patch("metagpt.tools.libs.browser.click_element", new_callable=AsyncMock)
async def test_click(self, mock_click_element):
"""Test clicking on an element."""
self.browser.accessibility_tree = [
{"nodeId": "1", "backendDOMNodeId": 101, "name": "Button"},
{"nodeId": "2", "backendDOMNodeId": 102, "name": "Input"},
]
self.browser.page = AsyncMock()
await self.browser.click(1)
mock_click_element.assert_called_once()
@patch("metagpt.tools.libs.browser.click_element", new_callable=AsyncMock)
@patch("metagpt.tools.libs.browser.type_text", new_callable=AsyncMock)
async def test_type(self, mock_type_text, mock_click_element):
"""Test typing text into an input field."""
content = "Hello, world!"
self.browser.accessibility_tree = [
{"nodeId": "1", "backendDOMNodeId": 101, "name": "Button"},
{"nodeId": "2", "backendDOMNodeId": 102, "name": "Input"},
]
self.browser.page = AsyncMock()
await self.browser.type(1, content)
mock_click_element.assert_called_once()
mock_type_text.assert_called_once_with(self.browser.page, content)
@patch("metagpt.tools.libs.browser.key_press", new_callable=AsyncMock)
@patch("metagpt.tools.libs.browser.hover_element", new_callable=AsyncMock)
async def test_hover_press(self, mock_hover_element, mock_key_press):
"""Test Hover and press key"""
self.browser.accessibility_tree = [
{"nodeId": "1", "backendDOMNodeId": 101, "name": "Button"},
{"nodeId": "2", "backendDOMNodeId": 102, "name": "Input"},
]
self.browser.page = AsyncMock()
key_comb = "Enter"
await self.browser.hover(1)
await self.browser.press(key_comb)
mock_hover_element.assert_called_once()
mock_key_press.assert_called_once_with(self.browser.page, key_comb)
async def test_scroll(self):
"""Scroll the page up or down."""
await self.browser.scroll("down")
await self.browser.scroll("up")
async def test_go_back_and_forward(self):
await self.browser.go_back()
await self.browser.go_forward()
async def test_tab_focus(self):
await self.browser.tab_focus(0)
async def test_close_tab(self):
"""Test closing a tab."""
mock_close = AsyncMock()
self.browser.page = AsyncMock()
self.browser.page.close = mock_close
await self.browser.close_tab()
mock_close.assert_called_once()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/libs/test_linter.py | tests/metagpt/tools/libs/test_linter.py | import tempfile
from pathlib import Path
import pytest
from metagpt.tools.libs.linter import Linter, LintResult
def test_linter_initialization():
linter = Linter(encoding="utf-8", root="/test/root")
assert linter.encoding == "utf-8"
assert linter.root == "/test/root"
assert "python" in linter.languages
assert callable(linter.languages["python"])
def test_get_abs_fname():
linter = Linter(root="/test/root")
abs_path = linter.get_abs_fname("test_file.py")
assert abs_path == linter.get_rel_fname("test_file.py")
def test_py_lint():
linter = Linter()
code = "print('Hello, World!')"
test_file_path = str(Path(__file__).resolve())
result = linter.py_lint(test_file_path, test_file_path, code)
assert result is None # No errors expected for valid Python code
def test_lint_with_python_file():
linter = Linter()
with tempfile.NamedTemporaryFile(suffix=".py", delete=True) as temp_file:
temp_file.write(b"def hello():\nprint('Hello')\n") # IndentationError
temp_file.flush()
result = linter.lint(temp_file.name)
assert isinstance(result, LintResult)
assert "IndentationError" in result.text
assert len(result.lines) > 0
def test_lint_with_unsupported_language():
linter = Linter()
with tempfile.NamedTemporaryFile(suffix=".unsupported", delete=True) as temp_file:
temp_file.write(b"This is unsupported code.")
temp_file.flush()
result = linter.lint(temp_file.name)
assert result is None # Unsupported language should return None
def test_run_cmd():
linter = Linter()
with tempfile.NamedTemporaryFile(suffix=".py", delete=True) as temp_file:
temp_file.write(b"print('Hello, World!')\n")
temp_file.flush()
result = linter.run_cmd("flake8", temp_file.name, "print('Hello, World!')")
# Since flake8 might not be installed in the test environment, we just ensure no exception is raised
assert result is None or isinstance(result, LintResult)
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/libs/test_editor.py | tests/metagpt/tools/libs/test_editor.py | import os
import shutil
from pathlib import Path
import pytest
from metagpt.const import TEST_DATA_PATH
from metagpt.tools.libs.editor import Editor
from metagpt.tools.libs.index_repo import (
CHATS_INDEX_ROOT,
CHATS_ROOT,
DEFAULT_MIN_TOKEN_COUNT,
UPLOAD_ROOT,
IndexRepo,
)
from metagpt.utils.common import list_files
TEST_FILE_CONTENT = """
# this is line one
def test_function_for_fm():
"some docstring"
a = 1
b = 2
c = 3
# this is the 7th line
""".strip()
WINDOW = 200
@pytest.fixture
def temp_file_path(tmp_path):
assert tmp_path is not None
temp_file_path = tmp_path / "a.txt"
yield temp_file_path
temp_file_path.unlink()
@pytest.fixture
def temp_py_file(tmp_path):
assert tmp_path is not None
temp_file_path = tmp_path / "test_script_for_editor.py"
temp_file_path.write_text(TEST_FILE_CONTENT)
yield temp_file_path
temp_file_path.unlink()
@pytest.fixture
def empty_file(tmp_path):
assert tmp_path is not None
temp_file_path = tmp_path / "test_script_empty_file_for_editor.py"
temp_file_path.write_text("")
yield temp_file_path
temp_file_path.unlink()
EXPECTED_CONTENT_AFTER_REPLACE = """
# this is line one
def test_function_for_fm():
# This is the new line A replacing lines 3 to 5.
# This is the new line B.
c = 3
# this is the 7th line
""".strip()
def test_replace_content(temp_py_file):
editor = Editor()
editor._edit_file_impl(
file_name=temp_py_file,
start=3,
end=5,
content=" # This is the new line A replacing lines 3 to 5.\n # This is the new line B.",
is_insert=False,
is_append=False,
)
with open(temp_py_file, "r") as f:
new_content = f.read()
assert new_content.strip() == EXPECTED_CONTENT_AFTER_REPLACE.strip()
EXPECTED_CONTENT_AFTER_DELETE = """
# this is line one
def test_function_for_fm():
c = 3
# this is the 7th line
""".strip()
def test_delete_content(temp_py_file):
editor = Editor()
editor._edit_file_impl(
file_name=temp_py_file,
start=3,
end=5,
content="",
is_insert=False,
is_append=False,
)
with open(temp_py_file, "r") as f:
new_content = f.read()
assert new_content.strip() == EXPECTED_CONTENT_AFTER_DELETE.strip()
EXPECTED_CONTENT_AFTER_INSERT = """
# this is line one
def test_function_for_fm():
# This is the new line to be inserted, at line 3
"some docstring"
a = 1
b = 2
c = 3
# this is the 7th line
""".strip()
def test_insert_content(temp_py_file):
editor = Editor(enable_auto_lint=True)
editor.insert_content_at_line(
file_name=temp_py_file,
line_number=3,
insert_content=" # This is the new line to be inserted, at line 3",
)
with open(temp_py_file, "r") as f:
new_content = f.read()
assert new_content.strip() == EXPECTED_CONTENT_AFTER_INSERT.strip()
@pytest.mark.parametrize(
"filename",
[
TEST_DATA_PATH / "output_parser/1.md",
TEST_DATA_PATH / "search/serper-metagpt-8.json",
TEST_DATA_PATH / "audio/hello.mp3",
TEST_DATA_PATH / "code/python/1.py",
TEST_DATA_PATH / "code/js/1.js",
TEST_DATA_PATH / "ui/1b.png.html",
TEST_DATA_PATH / "movie/trailer.mp4",
],
)
@pytest.mark.asyncio
async def test_read_files(filename):
editor = Editor()
file_block = await editor.read(filename)
assert file_block
assert file_block.file_path
if filename.suffix not in [".png", ".mp3", ".mp4"]:
assert file_block.block_content
def _numbered_test_lines(start, end) -> str:
return ("\n".join(f"{i}|" for i in range(start, end + 1))) + "\n"
def _generate_test_file_with_lines(temp_path, num_lines) -> str:
file_path = temp_path / "test_file.py"
file_path.write_text("\n" * num_lines)
return file_path
def _generate_ruby_test_file_with_lines(temp_path, num_lines) -> str:
file_path = temp_path / "test_file.rb"
file_path.write_text("\n" * num_lines)
return file_path
def _calculate_window_bounds(current_line, total_lines, window_size):
half_window = window_size // 2
if current_line - half_window < 0:
start = 1
end = window_size
else:
start = current_line - half_window
end = current_line + half_window
return start, end
def test_open_file_unexist_path():
editor = Editor()
with pytest.raises(FileNotFoundError):
editor.open_file("/unexist/path/a.txt")
def test_open_file(temp_file_path):
editor = Editor()
temp_file_path.write_text("Line 1\nLine 2\nLine 3\nLine 4\nLine 5")
result = editor.open_file(str(temp_file_path))
assert result is not None
expected = (
f"[File: {temp_file_path} (5 lines total)]\n"
"(this is the beginning of the file)\n"
"001|Line 1\n"
"002|Line 2\n"
"003|Line 3\n"
"004|Line 4\n"
"005|Line 5\n"
"(this is the end of the file)"
)
assert result.split("\n") == expected.split("\n")
def test_open_file_with_indentation(temp_file_path):
editor = Editor()
temp_file_path.write_text("Line 1\n Line 2\nLine 3\nLine 4\nLine 5")
result = editor.open_file(str(temp_file_path))
assert result is not None
expected = (
f"[File: {temp_file_path} (5 lines total)]\n"
"(this is the beginning of the file)\n"
"001|Line 1\n"
"002| Line 2\n"
"003|Line 3\n"
"004|Line 4\n"
"005|Line 5\n"
"(this is the end of the file)"
)
assert result.split("\n") == expected.split("\n")
def test_open_file_long(temp_file_path):
editor = Editor()
content = "\n".join([f"Line {i}" for i in range(1, 1001)])
temp_file_path.write_text(content)
result = editor.open_file(str(temp_file_path), 1, 50)
assert result is not None
expected = f"[File: {temp_file_path} (1000 lines total)]\n"
expected += "(this is the beginning of the file)\n"
for i in range(1, 51):
expected += f"{i:03d}|Line {i}\n"
expected += "(950 more lines below)"
assert result.split("\n") == expected.split("\n")
def test_open_file_long_with_lineno(temp_file_path):
editor = Editor()
content = "\n".join([f"Line {i}" for i in range(1, 1001)])
temp_file_path.write_text(content)
cur_line = 300
result = editor.open_file(str(temp_file_path), cur_line)
assert result is not None
expected = f"[File: {temp_file_path} (1000 lines total)]\n"
start, end = _calculate_window_bounds(cur_line, 1000, WINDOW)
if start == 1:
expected += "(this is the beginning of the file)\n"
else:
expected += f"({start - 1} more lines above)\n"
for i in range(start, end + 1):
expected += f"{i:03d}|Line {i}\n"
if end == 1000:
expected += "(this is the end of the file)\n"
else:
expected += f"({1000 - end} more lines below)"
assert result.split("\n") == expected.split("\n")
@pytest.mark.asyncio
async def test_create_file(temp_file_path):
editor = Editor()
result = await editor.create_file(str(temp_file_path))
expected = f"[File {temp_file_path} created.]"
assert result.split("\n") == expected.split("\n")
def test_goto_line(temp_file_path):
editor = Editor()
total_lines = 1000
content = "\n".join([f"Line {i}" for i in range(1, total_lines + 1)])
temp_file_path.write_text(content)
result = editor.open_file(str(temp_file_path))
assert result is not None
expected = f"[File: {temp_file_path} ({total_lines} lines total)]\n"
expected += "(this is the beginning of the file)\n"
for i in range(1, WINDOW + 1):
expected += f"{i:03d}|Line {i}\n"
expected += f"({total_lines - WINDOW} more lines below)"
assert result.split("\n") == expected.split("\n")
result = editor.goto_line(500)
assert result is not None
cur_line = 500
expected = f"[File: {temp_file_path} ({total_lines} lines total)]\n"
start, end = _calculate_window_bounds(cur_line, total_lines, WINDOW)
if start == 1:
expected += "(this is the beginning of the file)\n"
else:
expected += f"({start - 1} more lines above)\n"
for i in range(start, end + 1):
expected += f"{i:03d}|Line {i}\n"
if end == total_lines:
expected += "(this is the end of the file)\n"
else:
expected += f"({total_lines - end} more lines below)"
assert result.split("\n") == expected.split("\n")
def test_goto_line_negative(temp_file_path):
editor = Editor()
content = "\n".join([f"Line {i}" for i in range(1, 5)])
temp_file_path.write_text(content)
editor.open_file(str(temp_file_path))
with pytest.raises(ValueError):
editor.goto_line(-1)
def test_goto_line_out_of_bound(temp_file_path):
editor = Editor()
content = "\n".join([f"Line {i}" for i in range(1, 5)])
temp_file_path.write_text(content)
editor.open_file(str(temp_file_path))
with pytest.raises(ValueError):
editor.goto_line(100)
def test_scroll_down(temp_file_path):
editor = Editor()
total_lines = 1000
content = "\n".join([f"Line {i}" for i in range(1, total_lines + 1)])
temp_file_path.write_text(content)
result = editor.open_file(str(temp_file_path))
assert result is not None
expected = f"[File: {temp_file_path} ({total_lines} lines total)]\n"
start, end = _calculate_window_bounds(1, total_lines, WINDOW)
if start == 1:
expected += "(this is the beginning of the file)\n"
else:
expected += f"({start - 1} more lines above)\n"
for i in range(start, end + 1):
expected += f"{i:03d}|Line {i}\n"
if end == total_lines:
expected += "(this is the end of the file)"
else:
expected += f"({total_lines - end} more lines below)"
assert result.split("\n") == expected.split("\n")
result = editor.scroll_down()
assert result is not None
expected = f"[File: {temp_file_path} ({total_lines} lines total)]\n"
start, end = _calculate_window_bounds(WINDOW + 1, total_lines, WINDOW)
if start == 1:
expected += "(this is the beginning of the file)\n"
else:
expected += f"({start - 1} more lines above)\n"
for i in range(start, end + 1):
expected += f"{i:03d}|Line {i}\n"
if end == total_lines:
expected += "(this is the end of the file)\n"
else:
expected += f"({total_lines - end} more lines below)"
assert result.split("\n") == expected.split("\n")
def test_scroll_up(temp_file_path):
editor = Editor()
total_lines = 1000
content = "\n".join([f"Line {i}" for i in range(1, total_lines + 1)])
temp_file_path.write_text(content)
cur_line = 500
result = editor.open_file(str(temp_file_path), cur_line)
assert result is not None
expected = f"[File: {temp_file_path} ({total_lines} lines total)]\n"
start, end = _calculate_window_bounds(cur_line, total_lines, WINDOW)
if start == 1:
expected += "(this is the beginning of the file)\n"
else:
expected += f"({start - 1} more lines above)\n"
for i in range(start, end + 1):
expected += f"{i:03d}|Line {i}\n"
if end == total_lines:
expected += "(this is the end of the file)\n"
else:
expected += f"({total_lines - end} more lines below)"
assert result.split("\n") == expected.split("\n")
result = editor.scroll_up()
assert result is not None
cur_line = cur_line - WINDOW
expected = f"[File: {temp_file_path} ({total_lines} lines total)]\n"
start, end = _calculate_window_bounds(cur_line, total_lines, WINDOW)
if start == 1:
expected += "(this is the beginning of the file)\n"
else:
expected += f"({start - 1} more lines above)\n"
for i in range(start, end + 1):
expected += f"{i:03d}|Line {i}\n"
if end == total_lines:
expected += "(this is the end of the file)\n"
else:
expected += f"({total_lines - end} more lines below)"
print(result)
print(expected)
assert result.split("\n") == expected.split("\n")
def test_scroll_down_edge(temp_file_path):
editor = Editor()
content = "\n".join([f"Line {i}" for i in range(1, 10)])
temp_file_path.write_text(content)
result = editor.open_file(str(temp_file_path))
assert result is not None
expected = f"[File: {temp_file_path} (9 lines total)]\n"
expected += "(this is the beginning of the file)\n"
for i in range(1, 10):
expected += f"{i:03d}|Line {i}\n"
expected += "(this is the end of the file)"
result = editor.scroll_down()
assert result is not None
assert result.split("\n") == expected.split("\n")
def test_print_window_internal(temp_file_path):
editor = Editor()
editor.create_file(str(temp_file_path))
with open(temp_file_path, "w") as file:
for i in range(1, 101):
file.write(f"Line `{i}`\n")
current_line = 50
window = 2
result = editor._print_window(temp_file_path, current_line, window)
expected = "(48 more lines above)\n" "049|Line `49`\n" "050|Line `50`\n" "051|Line `51`\n" "(49 more lines below)"
assert result == expected
def test_open_file_large_line_number(temp_file_path):
editor = Editor()
editor.create_file(str(temp_file_path))
with open(temp_file_path, "w") as file:
for i in range(1, 1000):
file.write(f"Line `{i}`\n")
current_line = 800
window = 100
result = editor.open_file(str(temp_file_path), current_line, window)
expected = f"[File: {temp_file_path} (999 lines total)]\n"
expected += "(749 more lines above)\n"
for i in range(750, 850 + 1):
expected += f"{i}|Line `{i}`\n"
expected += "(149 more lines below)"
assert result == expected
def test_open_file_large_line_number_consecutive_diff_window(temp_file_path):
editor = Editor()
editor.create_file(str(temp_file_path))
total_lines = 1000
with open(temp_file_path, "w") as file:
for i in range(1, total_lines + 1):
file.write(f"Line `{i}`\n")
current_line = 800
cur_window = 300
result = editor.open_file(str(temp_file_path), current_line, cur_window)
expected = f"[File: {temp_file_path} ({total_lines} lines total)]\n"
start, end = _calculate_window_bounds(current_line, total_lines, cur_window)
if start == 1:
expected += "(this is the beginning of the file)\n"
else:
expected += f"({start - 1} more lines above)\n"
for i in range(current_line - cur_window // 2, current_line + cur_window // 2 + 1):
expected += f"{i}|Line `{i}`\n"
if end == total_lines:
expected += "(this is the end of the file)\n"
else:
expected += f"({total_lines - end} more lines below)"
assert result == expected
current_line = current_line - WINDOW
result = editor.scroll_up()
expected = f"[File: {temp_file_path} ({total_lines} lines total)]\n"
start, end = _calculate_window_bounds(current_line, total_lines, WINDOW)
if start == 1:
expected += "(this is the beginning of the file)\n"
else:
expected += f"({start - 1} more lines above)\n"
for i in range(start, end + 1):
expected += f"{i}|Line `{i}`\n"
if end == total_lines:
expected += "(this is the end of the file)\n"
else:
expected += f"({total_lines - end} more lines below)"
assert result.split("\n") == expected.split("\n")
EXPECTED_CONTENT_AFTER_REPLACE_TEXT = """
# this is line one
def test_function_for_fm():
"some docstring"
a = 1
b = 9
c = 3
# this is the 7th line
""".strip()
def test_edit_file_by_replace(temp_py_file):
editor = Editor()
editor.edit_file_by_replace(
file_name=str(temp_py_file),
first_replaced_line_number=5,
first_replaced_line_content=" b = 2",
new_content=" b = 9",
last_replaced_line_number=5,
last_replaced_line_content=" b = 2",
)
with open(temp_py_file, "r") as f:
new_content = f.read()
assert new_content.strip() == EXPECTED_CONTENT_AFTER_REPLACE_TEXT.strip()
MISMATCH_ERROR = """
Error: The `first_replaced_line_number` does not match the `first_replaced_line_content`. Please correct the parameters.
The `first_replaced_line_number` is 5 and the corresponding content is " b = 2".
But the `first_replaced_line_content ` is "".
The content around the specified line is:
The 002 line is "def test_function_for_fm():"
The 003 line is " "some docstring""
The 004 line is " a = 1"
The 005 line is " b = 2"
The 006 line is " c = 3"
The 007 line is " # this is the 7th line"
Pay attention to the new content. Ensure that it aligns with the new parameters.
Error: The `last_replaced_line_number` does not match the `last_replaced_line_content`. Please correct the parameters.
The `last_replaced_line_number` is 5 and the corresponding content is " b = 2".
But the `last_replaced_line_content ` is "".
The content around the specified line is:
The 002 line is "def test_function_for_fm():"
The 003 line is " "some docstring""
The 004 line is " a = 1"
The 005 line is " b = 2"
The 006 line is " c = 3"
The 007 line is " # this is the 7th line"
Pay attention to the new content. Ensure that it aligns with the new parameters.
""".strip()
def test_edit_file_by_replace_mismatch(temp_py_file):
editor = Editor()
with pytest.raises(ValueError) as match_error:
editor.edit_file_by_replace(
file_name=str(temp_py_file),
first_replaced_line_number=5,
first_replaced_line_content="",
new_content=" b = 9",
last_replaced_line_number=5,
last_replaced_line_content="",
)
assert str(match_error.value).strip() == MISMATCH_ERROR.strip()
def test_append_file(temp_file_path):
editor = Editor()
# 写入初始内容
initial_content = "Line 1\nLine 2\nLine 3\n"
temp_file_path.write_text(initial_content)
# 追加内容到文件
append_content = "Line 4\nLine 5\n"
result = editor.append_file(str(temp_file_path), append_content)
# 预期内容
expected_content = initial_content + append_content
# 读取文件并断言内容与预期一致
with open(temp_file_path, "r") as f:
new_content = f.read()
assert new_content == expected_content
# 输出的预期结果
expected_output = (
f"[File: {temp_file_path.resolve()} (5 lines total after edit)]\n"
"(this is the beginning of the file)\n"
"001|Line 1\n"
"002|Line 2\n"
"003|Line 3\n"
"004|Line 4\n"
"005|Line 5\n"
"(this is the end of the file)\n"
"[File updated (edited at line 3)]."
)
assert result.split("\n") == expected_output.split("\n")
def test_search_dir(tmp_path):
editor = Editor()
dir_path = tmp_path / "test_dir"
dir_path.mkdir()
# Create some files with specific content
(dir_path / "file1.txt").write_text("This is a test file with some content.")
(dir_path / "file2.txt").write_text("Another file with different content.")
sub_dir = dir_path / "sub_dir"
sub_dir.mkdir()
(sub_dir / "file3.txt").write_text("This file is inside a sub directory with some content.")
search_term = "some content"
result = editor.search_dir(search_term, str(dir_path))
assert "file1.txt" in result
assert "file3.txt" in result
assert "Another file with different content." not in result
def test_search_dir_in_default_dir(tmp_path):
editor = Editor()
dir_path = editor.working_dir / "test_dir"
dir_path.mkdir(exist_ok=True)
# Create some files with specific content
(dir_path / "file1.txt").write_text("This is a test file with some content.")
(dir_path / "file2.txt").write_text("Another file with different content.")
sub_dir = dir_path / "sub_dir"
sub_dir.mkdir(exist_ok=True)
(sub_dir / "file3.txt").write_text("This file is inside a sub directory with some content.")
search_term = "some content"
result = editor.search_dir(search_term)
assert "file1.txt" in result
assert "file3.txt" in result
assert "Another file with different content." not in result
def test_search_file(temp_file_path):
editor = Editor()
file_path = temp_file_path
file_path.write_text("This is a test file with some content.\nAnother line with more content.")
search_term = "some content"
result = editor.search_file(search_term, str(file_path))
assert "Line 1: This is a test file with some content." in result
assert "Line 2: Another line with more content." not in result
def test_find_file(tmp_path):
editor = Editor()
dir_path = tmp_path / "test_dir"
dir_path.mkdir()
# Create some files with specific names
(dir_path / "file1.txt").write_text("Content of file 1.")
(dir_path / "file2.txt").write_text("Content of file 2.")
sub_dir = dir_path / "sub_dir"
sub_dir.mkdir()
(sub_dir / "file3.txt").write_text("Content of file 3.")
file_name = "file1.txt"
result = editor.find_file(file_name, str(dir_path))
assert "file1.txt" in result
assert "file2.txt" not in result
assert "file3.txt" not in result
# Test data for _append_impl method
TEST_LINES = ["First line\n", "Second line\n", "Third line\n"]
NEW_CONTENT = "Appended line\n"
EXPECTED_APPEND_NON_EMPTY_FILE = ["First line\n", "Second line\n", "Third line\n", "Appended line\n"]
EXPECTED_APPEND_EMPTY_FILE = ["Appended line\n"]
def test_append_non_empty_file():
editor = Editor()
lines = TEST_LINES.copy()
content, n_added_lines = editor._append_impl(lines, NEW_CONTENT)
assert content.splitlines(keepends=True) == EXPECTED_APPEND_NON_EMPTY_FILE
assert n_added_lines == 1
def test_append_empty_file():
editor = Editor()
lines = []
content, n_added_lines = editor._append_impl(lines, NEW_CONTENT)
assert content.splitlines(keepends=True) == EXPECTED_APPEND_EMPTY_FILE
assert n_added_lines == 1
def test_append_to_single_empty_line_file():
editor = Editor()
lines = [""]
content, n_added_lines = editor._append_impl(lines, NEW_CONTENT)
assert content.splitlines(keepends=True) == EXPECTED_APPEND_EMPTY_FILE
assert n_added_lines == 1
async def mock_index_repo():
chat_id = "1"
chat_path = Path(CHATS_ROOT) / chat_id
chat_path.mkdir(parents=True, exist_ok=True)
src_path = TEST_DATA_PATH / "requirements"
command = f"cp -rf {str(src_path)} {str(chat_path)}"
os.system(command)
filenames = list_files(chat_path)
chat_files = [i for i in filenames if Path(i).suffix in {".md", ".txt", ".json", ".pdf"}]
chat_repo = IndexRepo(
persist_path=str(Path(CHATS_INDEX_ROOT) / chat_id), root_path=str(chat_path), min_token_count=0
)
await chat_repo.add(chat_files)
assert chat_files
Path(UPLOAD_ROOT).mkdir(parents=True, exist_ok=True)
command = f"cp -rf {str(src_path)} {str(UPLOAD_ROOT)}"
os.system(command)
filenames = list_files(UPLOAD_ROOT)
uploads_files = [i for i in filenames if Path(i).suffix in {".md", ".txt", ".json", ".pdf"}]
assert uploads_files
filenames = list_files(src_path)
other_files = [i for i in filenames if Path(i).suffix in {".md", ".txt", ".json", ".pdf"}]
assert other_files
return chat_path, UPLOAD_ROOT, src_path
@pytest.mark.skip
@pytest.mark.asyncio
async def test_index_repo():
# mock data
chat_path, upload_path, src_path = await mock_index_repo()
editor = Editor()
rsp = await editor.similarity_search(query="业务线", path=chat_path)
assert rsp
rsp = await editor.similarity_search(query="业务线", path=upload_path)
assert rsp
rsp = await editor.similarity_search(query="业务线", path=src_path)
assert rsp
shutil.rmtree(CHATS_ROOT)
shutil.rmtree(UPLOAD_ROOT)
@pytest.mark.skip
@pytest.mark.asyncio
@pytest.mark.parametrize(
("query", "filename"),
[
(
"In this document, who are the legal representatives of both parties?",
TEST_DATA_PATH / "pdf/20210709逗你学云豆付费课程协议.pdf",
),
(
"What is the short name of the company in this document?",
TEST_DATA_PATH / "pdf/company_stock_code.pdf",
),
("平安创新推出中国版的什么模式,将差异化的医疗健康服务与作为支付方的金融业务无缝结合", TEST_DATA_PATH / "pdf/9112674.pdf"),
(
"What principle is introduced by the author to explain the conditions necessary for the emergence of complexity?",
TEST_DATA_PATH / "pdf/9781444323498.ch2_1.pdf",
),
("行高的继承性的代码示例是?", TEST_DATA_PATH / "pdf/02-CSS.pdf"),
],
)
async def test_similarity_search(query, filename):
filename = Path(filename)
save_to = Path(UPLOAD_ROOT) / filename.name
save_to.parent.mkdir(parents=True, exist_ok=True)
os.system(f"cp {str(filename)} {str(save_to)}")
editor = Editor()
rsp = await editor.similarity_search(query=query, path=save_to)
assert rsp
save_to.unlink(missing_ok=True)
@pytest.mark.skip
@pytest.mark.asyncio
async def test_read():
editor = Editor()
filename = TEST_DATA_PATH / "pdf/9112674.pdf"
content = await editor.read(str(filename))
size = filename.stat().st_size
assert "similarity_search" in content.block_content and size > 5 * DEFAULT_MIN_TOKEN_COUNT
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/libs/test_sd_engine.py | tests/metagpt/tools/libs/test_sd_engine.py | # -*- coding: utf-8 -*-
# @Date : 1/10/2024 10:07 PM
# @Author : stellahong (stellahong@fuzhi.ai)
# @Desc :
import base64
import io
import json
import pytest
from PIL import Image, ImageDraw
from metagpt.tools.libs.sd_engine import SDEngine
def generate_mock_image_data():
# 创建一个简单的图片对象
image = Image.new("RGB", (100, 100), color="white")
draw = ImageDraw.Draw(image)
draw.text((10, 10), "Mock Image", fill="black")
# 将图片转换为二进制数据
with io.BytesIO() as buffer:
image.save(buffer, format="PNG")
image_binary = buffer.getvalue()
# 对图片二进制数据进行 base64 编码
image_base64 = base64.b64encode(image_binary).decode("utf-8")
return image_base64
def test_sd_tools(mocker):
mock_response = mocker.MagicMock()
mock_response.json.return_value = {"images": [generate_mock_image_data()]}
mocker.patch("requests.Session.post", return_value=mock_response)
engine = SDEngine(sd_url="http://example_localhost:7860")
prompt = "1boy, hansom"
engine.construct_payload(prompt)
engine.simple_run_t2i(engine.payload)
def test_sd_construct_payload():
engine = SDEngine(sd_url="http://example_localhost:7860")
prompt = "1boy, hansom"
engine.construct_payload(prompt)
assert "negative_prompt" in engine.payload
@pytest.mark.asyncio
async def test_sd_asyn_t2i(mocker):
mock_post = mocker.patch("aiohttp.ClientSession.post")
mock_response = mocker.AsyncMock()
mock_response.read.return_value = json.dumps({"images": [generate_mock_image_data()]})
mock_post.return_value.__aenter__.return_value = mock_response
engine = SDEngine(sd_url="http://example_localhost:7860")
prompt = "1boy, hansom"
engine.construct_payload(prompt)
await engine.run_t2i([engine.payload])
assert "negative_prompt" in engine.payload
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/libs/test_web_scraping.py | tests/metagpt/tools/libs/test_web_scraping.py | import pytest
from metagpt.tools.libs.web_scraping import view_page_element_to_scrape
@pytest.mark.asyncio
async def test_view_page_element_to_scrape():
# Define the test URL and parameters
test_url = "https://docs.deepwisdom.ai/main/zh/"
test_requirement = "Retrieve all paragraph texts"
test_keep_links = True
test_page = await view_page_element_to_scrape(test_url, test_requirement, test_keep_links)
assert isinstance(test_page, str)
assert "html" in test_page
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/libs/__init__.py | tests/metagpt/tools/libs/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2024/1/11 16:14
# @Author : lidanyang
# @File : __init__.py
# @Desc :
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/libs/test_terminal.py | tests/metagpt/tools/libs/test_terminal.py | import pytest
from metagpt.const import DATA_PATH, METAGPT_ROOT
from metagpt.tools.libs.terminal import Terminal
@pytest.mark.asyncio
async def test_terminal():
terminal = Terminal()
await terminal.run_command(f"cd {METAGPT_ROOT}")
output = await terminal.run_command("pwd")
assert output.strip() == str(METAGPT_ROOT)
# pwd now should be METAGPT_ROOT, cd data should land in DATA_PATH
await terminal.run_command("cd data")
output = await terminal.run_command("pwd")
assert output.strip() == str(DATA_PATH)
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/libs/test_shell.py | tests/metagpt/tools/libs/test_shell.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from metagpt.tools.libs.shell import shell_execute
@pytest.mark.asyncio
@pytest.mark.parametrize(
["command", "expect_stdout", "expect_stderr"],
[
(["file", f"{__file__}"], "Python script text executable, ASCII text", ""),
(f"file {__file__}", "Python script text executable, ASCII text", ""),
],
)
async def test_shell(command, expect_stdout, expect_stderr):
stdout, stderr, returncode = await shell_execute(command)
assert returncode == 0
assert expect_stdout in stdout
assert stderr == expect_stderr
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/libs/test_index_repo.py | tests/metagpt/tools/libs/test_index_repo.py | import shutil
from pathlib import Path
import pytest
from metagpt.const import DEFAULT_WORKSPACE_ROOT, TEST_DATA_PATH
from metagpt.tools.libs.index_repo import (
CHATS_INDEX_ROOT,
UPLOADS_INDEX_ROOT,
IndexRepo,
)
@pytest.mark.skip
@pytest.mark.asyncio
@pytest.mark.parametrize(("path", "query"), [(TEST_DATA_PATH / "requirements", "业务线")])
async def test_index_repo(path, query):
index_path = DEFAULT_WORKSPACE_ROOT / ".index"
repo = IndexRepo(persist_path=str(index_path), root_path=str(path), min_token_count=0)
await repo.add([path])
await repo.add([path])
assert index_path.exists()
rsp = await repo.search(query)
assert rsp
repo2 = IndexRepo(persist_path=str(index_path), root_path=str(path), min_token_count=0)
rsp2 = await repo2.search(query)
assert rsp2
merged_rsp = await repo.merge(query=query, indices_list=[rsp, rsp2])
assert merged_rsp
shutil.rmtree(index_path)
@pytest.mark.parametrize(
("paths", "path_type", "root"),
[
(["/data/uploads"], UPLOADS_INDEX_ROOT, "/data/uploads"),
(["/data/uploads/"], UPLOADS_INDEX_ROOT, "/data/uploads"),
(["/data/chats/1/1.txt"], str(Path(CHATS_INDEX_ROOT) / "1"), "/data/chats/1"),
(["/data/chats/1/2.txt"], str(Path(CHATS_INDEX_ROOT) / "1"), "/data/chats/1"),
(["/data/chats/2/2.txt", "/data/chats/2/2.txt"], str(Path(CHATS_INDEX_ROOT) / "2"), "/data/chats/2"),
(["/data/chats.txt"], "other", ""),
],
)
def test_classify_path(paths, path_type, root):
result, result_root = IndexRepo.find_index_repo_path(paths)
assert path_type in set(result.keys())
assert root == result_root.get(path_type, "")
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/libs/test_cr.py | tests/metagpt/tools/libs/test_cr.py | import json
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
import pytest_asyncio
from metagpt.tools.libs.cr import CodeReview
class MockFile:
def __init__(self, content):
self.content = content
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
pass
async def read(self):
return self.content
@pytest.mark.asyncio
class TestCodeReview:
@pytest_asyncio.fixture(autouse=True)
async def setup(self):
"""Fixture to initialize the CodeReview instance."""
self.cr = CodeReview()
@patch("aiofiles.open", new_callable=MagicMock)
@patch("metagpt.utils.report.EditorReporter.async_report", new_callable=AsyncMock)
@patch("metagpt.ext.cr.actions.code_review.CodeReview.run", new_callable=AsyncMock)
async def test_review(self, mock_run, mock_report, mock_aiofiles_open):
"""Test the review method with a local patch file."""
# mock patch_content
patch_content = """diff --git a/test.py b/test.py
index 1234567..89abcde 100644
--- a/test.py
+++ b/test.py
@@ -1,3 +1,3 @@
def foo():
- print("Hello")
+ print("World")
- print("Another line")
+ print("Another modified line")"""
# mock point file content
point_file_content = json.dumps([{"id": 1, "description": "Test point"}])
mock_patch_file = MockFile(patch_content)
mock_point_file = MockFile(point_file_content)
mock_aiofiles_open.side_effect = [mock_patch_file, mock_point_file]
mock_run.return_value = [{"comment": "Fix this line"}]
# run
result = await self.cr.review(patch_path="test.patch", output_file="output.json")
# assert
assert "The number of defects: 1" in result
mock_run.assert_called_once()
mock_report.assert_called()
@patch("aiofiles.open", new_callable=MagicMock)
@patch("metagpt.ext.cr.actions.modify_code.ModifyCode.run", new_callable=AsyncMock)
async def test_fix(self, mock_run, mock_aiofiles_open):
"""Test the fix method."""
patch_content = """diff --git a/test.py b/test.py
index 1234567..89abcde 100644
--- a/test.py
+++ b/test.py
@@ -1,3 +1,3 @@
def foo():
- print("Hello")
+ print("World")
- print("Another line")
+ print("Another modified line")"""
cr_file_content = json.dumps([{"comment": "Fix this line"}])
# mock file obj
mock_path_file = MockFile(patch_content)
mock_cr_file = MockFile(cr_file_content)
mock_aiofiles_open.side_effect = [mock_path_file, mock_cr_file]
# run fix
result = await self.cr.fix(patch_path="test.patch", cr_file="cr.json", output_dir="output")
# assert
assert "The fixed patch files store in output" in result
mock_run.assert_called_once()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/libs/test_feature_engineering.py | tests/metagpt/tools/libs/test_feature_engineering.py | import numpy as np
import pandas as pd
import pytest
from sklearn.datasets import fetch_california_housing, load_breast_cancer, load_iris
from metagpt.tools.libs.feature_engineering import (
CatCount,
CatCross,
ExtractTimeComps,
GeneralSelection,
GroupStat,
KFoldTargetMeanEncoder,
PolynomialExpansion,
SplitBins,
TargetMeanEncoder,
TreeBasedSelection,
VarianceBasedSelection,
)
@pytest.fixture
def mock_dataset():
return pd.DataFrame(
{
"num1": [1, 2, np.nan, 4, 5, 6, 7, 3],
"num2": [1, 3, 2, 1, np.nan, 5, 6, 4],
"num3": [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
"cat1": ["A", "B", np.nan, "D", "E", "C", "B", "A"],
"cat2": ["A", "A", "A", "A", "A", "A", "A", "A"],
"date1": [
"2020-01-01",
"2020-01-02",
"2020-01-03",
"2020-01-04",
"2020-01-05",
"2020-01-06",
"2020-01-07",
"2020-01-08",
],
"label": [0, 1, 0, 1, 0, 1, 0, 1],
}
)
def load_sklearn_data(data_name):
if data_name == "iris":
data = load_iris()
elif data_name == "breast_cancer":
data = load_breast_cancer()
elif data_name == "housing":
data = fetch_california_housing()
else:
raise ValueError("data_name not supported")
X, y, feature_names = data.data, data.target, data.feature_names
data = pd.DataFrame(X, columns=feature_names)
data["label"] = y
return data
def test_polynomial_expansion(mock_dataset):
pe = PolynomialExpansion(cols=["num1", "num2", "label"], degree=2, label_col="label")
transformed = pe.fit_transform(mock_dataset)
assert len(transformed.columns) == len(mock_dataset.columns) + 3
# when too many columns
data = load_sklearn_data("breast_cancer")
cols = [c for c in data.columns if c != "label"]
pe = PolynomialExpansion(cols=cols, degree=2, label_col="label")
transformed = pe.fit_transform(data)
assert len(transformed.columns) == len(data.columns) + 55
def test_cat_count(mock_dataset):
cc = CatCount(col="cat1")
transformed = cc.fit_transform(mock_dataset)
assert "cat1_cnt" in transformed.columns
assert transformed["cat1_cnt"][0] == 2
def test_target_mean_encoder(mock_dataset):
tme = TargetMeanEncoder(col="cat1", label="label")
transformed = tme.fit_transform(mock_dataset)
assert "cat1_target_mean" in transformed.columns
assert transformed["cat1_target_mean"][0] == 0.5
def test_kfold_target_mean_encoder(mock_dataset):
kfme = KFoldTargetMeanEncoder(col="cat1", label="label")
transformed = kfme.fit_transform(mock_dataset)
assert "cat1_kf_target_mean" in transformed.columns
def test_cat_cross(mock_dataset):
cc = CatCross(cols=["cat1", "cat2"])
transformed = cc.fit_transform(mock_dataset)
assert "cat1_cat2" in transformed.columns
cc = CatCross(cols=["cat1", "cat2"], max_cat_num=3)
transformed = cc.fit_transform(mock_dataset)
assert "cat1_cat2" not in transformed.columns
def test_group_stat(mock_dataset):
gs = GroupStat(group_col="cat1", agg_col="num1", agg_funcs=["mean", "sum"])
transformed = gs.fit_transform(mock_dataset)
assert "num1_mean_by_cat1" in transformed.columns
assert "num1_sum_by_cat1" in transformed.columns
def test_split_bins(mock_dataset):
sb = SplitBins(cols=["num1"])
transformed = sb.fit_transform(mock_dataset)
assert transformed["num1"].nunique() <= 5
assert all(0 <= x < 5 for x in transformed["num1"])
def test_extract_time_comps(mock_dataset):
time_comps = ["year", "month", "day", "hour", "dayofweek", "is_weekend"]
etc = ExtractTimeComps(time_col="date1", time_comps=time_comps)
transformed = etc.fit_transform(mock_dataset.copy())
for comp in time_comps:
assert comp in transformed.columns
assert transformed["year"][0] == 2020
assert transformed["month"][0] == 1
assert transformed["day"][0] == 1
assert transformed["hour"][0] == 0
assert transformed["dayofweek"][0] == 3
assert transformed["is_weekend"][0] == 0
def test_general_selection(mock_dataset):
gs = GeneralSelection(label_col="label")
transformed = gs.fit_transform(mock_dataset.copy())
assert "num3" not in transformed.columns
assert "cat2" not in transformed.columns
@pytest.mark.skip # skip because TreeBasedSelection needs lgb as dependency
def test_tree_based_selection(mock_dataset):
# regression
data = load_sklearn_data("housing")
tbs = TreeBasedSelection(label_col="label", task_type="reg")
transformed = tbs.fit_transform(data)
assert len(transformed.columns) > 1
# classification
data = load_sklearn_data("breast_cancer")
tbs = TreeBasedSelection(label_col="label", task_type="cls")
transformed = tbs.fit_transform(data)
assert len(transformed.columns) > 1
# multi-classification
data = load_sklearn_data("iris")
tbs = TreeBasedSelection(label_col="label", task_type="mcls")
transformed = tbs.fit_transform(data)
assert len(transformed.columns) > 1
def test_variance_based_selection(mock_dataset):
vbs = VarianceBasedSelection(label_col="label")
transformed = vbs.fit_transform(mock_dataset.copy())
assert "num3" not in transformed.columns
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/libs/test_email_login.py | tests/metagpt/tools/libs/test_email_login.py | from metagpt.tools.libs.email_login import email_login_imap
def test_email_login(mocker):
mock_mailbox = mocker.patch("metagpt.tools.libs.email_login.MailBox.login")
mock_mailbox.login.return_value = mocker.Mock()
email_login_imap("test@outlook.com", "test_password")
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/tools/libs/test_image_getter.py | tests/metagpt/tools/libs/test_image_getter.py | from pathlib import Path
from unittest.mock import AsyncMock, patch
import pytest
import pytest_asyncio
from metagpt.tools.libs.image_getter import ImageGetter
@pytest.mark.asyncio
class TestImageGetter:
@pytest_asyncio.fixture(autouse=True)
async def image_getter_client(self):
"""Fixture to initialize the ImageGetter."""
self.image_getter = ImageGetter(headless=True)
await self.image_getter.start()
yield self.image_getter
if self.image_getter.browser_instance:
await self.image_getter.browser_instance.close()
@patch("metagpt.tools.libs.image_getter.decode_image")
async def test_get_image_success(self, mock_decode_image):
"""Test successfully retrieving and saving an image."""
search_term = "nature"
image_save_path = Path.cwd() / "test_image_getter.jpg"
# Mock the decode_image to avoid actual image decoding
mock_image = AsyncMock()
mock_decode_image.return_value = mock_image
# Mock the Playwright page evaluation result to return a dummy base64 image string
self.image_getter.page.goto = AsyncMock()
self.image_getter.page.wait_for_selector = AsyncMock()
self.image_getter.page.evaluate = AsyncMock(return_value="data:image/png;base64,FAKEBASE64STRING")
result = await self.image_getter.get_image(search_term, str(image_save_path))
assert f"{search_term} found." in result
mock_decode_image.assert_called_once()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/tests/metagpt/document_store/test_chromadb_store.py | tests/metagpt/document_store/test_chromadb_store.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/6/6 00:41
@Author : alexanderwu
@File : test_chromadb_store.py
"""
from metagpt.document_store.chromadb_store import ChromaStore
# @pytest.mark.skip()
def test_chroma_store():
"""FIXME:chroma使用感觉很诡异,一用Python就挂,测试用例里也是"""
# 创建 ChromaStore 实例,使用 'sample_collection' 集合
document_store = ChromaStore("sample_collection_1", get_or_create=True)
# 使用 write 方法添加多个文档
document_store.write(
["This is document1", "This is document2"], [{"source": "google-docs"}, {"source": "notion"}], ["doc1", "doc2"]
)
# 使用 add 方法添加一个文档
document_store.add("This is document3", {"source": "notion"}, "doc3")
# 搜索文档
results = document_store.search("This is a query document", n_results=3)
assert len(results) > 0
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.